Beispiel #1
0
def cifar_resnet():
    dev = 0
    cntk_dev = cntk_device(dev)
    epoch_size = sys.maxsize
    mbs = create_mb_source(epoch_size)
    stream_infos = mbs.stream_infos()
    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    image_shape = features_si.m_sample_layout.dimensions()
    image_shape = (image_shape[2], image_shape[0], image_shape[1])

    num_classes = labels_si.m_sample_layout.dimensions()[0]

    image_input = variable(image_shape,
                           features_si.m_element_type,
                           needs_gradient=False,
                           name="Images")
    classifier_output = resnet_classifer(image_input, num_classes, dev,
                                         "classifierOutput")
    label_var = variable((num_classes),
                         features_si.m_element_type,
                         needs_gradient=False,
                         name="Labels")

    ce = cross_entropy_with_softmax(classifier_output, label_var)
    pe = classification_error(classifier_output, label_var)

    #TODO: add save and load module code
    image_classifier = combine([ce, pe, classifier_output], "ImageClassifier")

    lr = learning_rates_per_sample(0.0078125)

    mb_size = 32
    num_mbs = 1000

    trainer = Trainer(classifier_output, ce, pe,
                      [sgdlearner(classifier_output.owner.parameters(), lr)])

    for i in range(0, num_mbs):
        mb = mbs.get_next_minibatch(mb_size, cntk_dev)

        arguments = dict()
        arguments[image_input] = mb[features_si].m_data
        arguments[label_var] = mb[labels_si].m_data

        trainer.train_minibatch(arguments, cntk_dev)
        freq = 20
        if i % freq == 0:
            training_loss = get_train_loss(trainer)
            eval_crit = get_train_eval_criterion(trainer)
            print(
                "Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}"
                .format(i, training_loss, eval_crit))
Beispiel #2
0
def _test_cifar_resnet():
    dev = 0
    cntk_dev = cntk_device(dev)
    epoch_size = sys.maxsize
    mbs = create_mb_source(epoch_size)
    stream_infos = mbs.stream_infos()
    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    image_shape = features_si.m_sample_layout.dimensions()
    image_shape = (image_shape[2], image_shape[0], image_shape[1])

    num_classes = labels_si.m_sample_layout.dimensions()[0]

    image_input = variable(image_shape,
                           features_si.m_element_type,
                           needs_gradient=False,
                           name="Images")
    classifer_output = resnet_classifer(image_input, num_classes, dev,
                                        "classifierOutput")
    label_var = variable((num_classes, ),
                         features_si.m_element_type,
                         needs_gradient=False,
                         name="Labels")

    ce = cross_entropy_with_softmax(classifer_output.output(), label_var)
    pe = classification_error(classifer_output.output(), label_var)
    image_classifier = combine([ce, pe, classifer_output], "ImageClassifier")

    learning_rate_per_sample = cntk_py.learning_rates_per_sample(0.0078125)
    trainer = cntk_py.Trainer(image_classifier, ce.output(), [
        cntk_py.sgdlearner(image_classifier.parameters(),
                           learning_rate_per_sample)
    ])

    mb_size = 32
    num_mbs = 100

    minibatch_size_limits = dict()
    minibatch_size_limits[features_si] = (0, mb_size)
    minibatch_size_limits[labels_si] = (0, mb_size)
    for i in range(0, num_mbs):
        mb = mbs.get_next_minibatch(minibatch_size_limits, cntk_dev)

        arguments = dict()
        arguments[image_input] = mb[features_si].m_data
        arguments[label_var] = mb[labels_si].m_data

        trainer.train_minibatch(arguments, cntk_dev)

        freq = 20
        if i % freq == 0:
            print(str(i + freq) + ": " + str(get_train_loss(trainer)))
Beispiel #3
0
def test_simple_mnist():
    input_dim = 784
    num_output_classes = 10
    num_hidden_layers = 1
    hidden_layers_dim = 200
    epoch_size = sys.maxsize
    minibatch_size = 32
    num_samples_per_sweep = 60000
    num_sweeps_to_train_with = 3
    num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size    
    lr = cntk_py.learning_rates_per_sample(0.003125)
    input = variable((input_dim,), np.float32, needs_gradient=False, name="features")
    scaled_input = element_times(constant((), 0.00390625), input)

    label = variable((num_output_classes,), np.float32, needs_gradient=False, name="labels")
    
    dev = cntk_py.DeviceDescriptor.cpudevice()       
    netout = fully_connected_classifier_net(scaled_input.output(), num_output_classes, hidden_layers_dim, num_hidden_layers, dev, sigmoid)  
        
    ce = cross_entropy_with_softmax(netout.output(), label)
    pe = classification_error(netout.output(), label)
    ffnet = combine([ce, pe, netout], "classifier_model")        
    
    cm = create_mb_source(input_dim, num_output_classes, epoch_size)
        
    stream_infos = cm.stream_infos()  
    
    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    minibatch_size_limits = dict()    
    minibatch_size_limits[features_si] = (0,minibatch_size)
    minibatch_size_limits[labels_si] = (0,minibatch_size)
                         
    trainer = cntk_py.Trainer(ffnet, ce.output(), [cntk_py.sgdlearner(ffnet.parameters(), lr)])          
    
    for i in range(0,int(num_minibatches_to_train)):    
        mb=cm.get_next_minibatch(minibatch_size_limits, dev)
        
        arguments = dict()
        arguments[input] = mb[features_si].m_data
        arguments[label] = mb[labels_si].m_data
        
        trainer.train_minibatch(arguments, dev)

        freq = 20        
        if i % freq == 0: 
            training_loss = get_train_loss(trainer)                   
            print(str(i+freq) + ": " + str(training_loss)) 
    #TODO: move the testing code into a separate test module ?
    assert np.allclose(training_loss, 0.6142425537109375, atol=TOLERANCE_ABSOLUTE)               
Beispiel #4
0
def ffnet():
    input_dim = 2
    num_output_classes = 2
    num_hidden_layers = 2
    hidden_layers_dim = 50
    epoch_size = sys.maxsize
    minibatch_size = 25
    num_samples_per_sweep = 10000
    num_sweeps_to_train_with = 2
    num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
    lr = learning_rates_per_sample(0.02)
    input = variable((input_dim,), np.float32, needs_gradient=False, name="features")
    label = variable((num_output_classes,), np.float32, needs_gradient=False, name="labels")
    dev = -1
    cntk_dev = cntk_device(dev)
    netout = fully_connected_classifier_net(input, num_output_classes, hidden_layers_dim, num_hidden_layers, dev, sigmoid)

    ce = cross_entropy_with_softmax(netout, label)
    pe = classification_error(netout, label)
    #TODO: add save and load module code
    ffnet = combine([ce, pe, netout], "classifier_model")

    rel_path = r"../../../../Examples/Other/Simple2d/Data/SimpleDataTrain_cntk_text.txt"
    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
    cm = create_text_mb_source(path, input_dim, num_output_classes, epoch_size)

    stream_infos = cm.stream_infos()

    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    trainer = Trainer(netout, ce, pe, [sgdlearner(netout.owner.parameters(), lr)])

    for i in range(0,int(num_minibatches_to_train)):
        mb=cm.get_next_minibatch(minibatch_size, cntk_dev)

        arguments = dict()
        arguments[input] = mb[features_si].m_data
        arguments[label] = mb[labels_si].m_data

        trainer.train_minibatch(arguments, cntk_dev)
        freq = 20
        if i % freq == 0:
            training_loss = get_train_loss(trainer)
            eval_crit = get_train_eval_criterion(trainer)
            print ("Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}".format(i, training_loss, eval_crit))
Beispiel #5
0
def train_sequence_classifier(device):
    input_dim = 2000
    cell_dim = 25
    hidden_dim = 25
    embedding_dim = 50
    num_output_classes = 5

    features = variable(shape=input_dim, is_sparse=True, name="features")
    classifier_output = LSTM_sequence_classifer_net(features,
                                                    num_output_classes,
                                                    embedding_dim, hidden_dim,
                                                    cell_dim, device)

    label = variable(num_output_classes,
                     dynamic_axes=[Axis.default_batch_axis()],
                     name="labels")
    ce = cross_entropy_with_softmax(classifier_output, label)
    pe = classification_error(classifier_output, label)

    #TODO: add save and load module code
    lstm_net = combine([ce, pe, classifier_output], "classifier_model")

    rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
    cm = create_text_mb_source(path, input_dim, num_output_classes, 0, True,
                               False, "x", "y")

    stream_infos = cm.stream_infos()

    for si in stream_infos:
        if si.m_name == 'features':
            features_si = si
        elif si.m_name == 'labels':
            labels_si = si

    minibatch_size = 200
    lr = lr = learning_rates_per_sample(0.0005)

    trainer = Trainer(classifier_output, ce, pe,
                      [sgdlearner(classifier_output.owner.parameters(), lr)])

    freq = 1
    i = 0
    cntk_dev = cntk_device(device)
    while True:
        mb = cm.get_next_minibatch(minibatch_size, cntk_dev)
        if len(mb) == 0:
            break
        arguments = dict()
        arguments[features] = mb[features_si].m_data
        arguments[label] = mb[labels_si].m_data

        trainer.train_minibatch(arguments, cntk_dev)

        if i % freq == 0:
            training_loss = get_train_loss(trainer)
            eval_crit = get_train_eval_criterion(trainer)
            print(
                "Minibatch: {}, Train Loss: {}, Train Evaluation Criterion: {}"
                .format(i, training_loss, eval_crit))

        i += 1
Beispiel #6
0
    minibatch_config["deserializers"] = [deserializer_config]

    return create_minibatch_source(minibatch_config)

if __name__=='__main__':      
    input_dim = 2
    num_output_classes = 2
    num_hidden_layers = 2
    hidden_layers_dim = 50
    
    minibatch_size = 25
    num_samples_per_sweep = 10000
    num_sweeps_to_train_with = 2
    num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
    lr = 0.02
    input = variable((input_dim,), np.float32, needs_gradient=False, name="features")
    label = variable((num_output_classes,), np.float32, needs_gradient=False, name="labels")
    dev = cntk_py.DeviceDescriptor.cpudevice()     
    netout = fully_connected_classifier_net(input, num_output_classes, hidden_layers_dim, num_hidden_layers, dev, sigmoid)  
        
    ce = cross_entropy_with_softmax(netout.output(), label)
    pe = classification_error(netout.output(), label)
    ffnet = combine([ce, pe, netout], "classifier_model")      
    
    cm = create_mb_source()
        
    streamInfos = cm.stream_infos();    
    
    minibatchSizeLimits = dict()    
    minibatchSizeLimits[streamInfos[0]] = (0,minibatch_size)
    minibatchSizeLimits[streamInfos[1]] = (0,minibatch_size)