def graph_eval(input_graph_def, graph, input_node, output_node, batchsize):

    input_graph_def.ParseFromString(tf.io.gfile.GFile(graph, "rb").read())

    # CIFAR-10 dataset download and preprocessing
    # y_test labels will be one-hot encoded
    (_, _), (x_test, y_test) = datadownload()

    total_batches = int(len(x_test) / batchsize)

    tf.import_graph_def(input_graph_def, name='')

    # Get input placeholders & tensors
    images_in = tf.compat.v1.get_default_graph().get_tensor_by_name(
        input_node + ':0')

    # get output tensors
    logits = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node +
                                                                 ':0')
    predicted_logit = tf.argmax(input=logits, axis=1, output_type=tf.int32)

    with tf.compat.v1.Session() as sess:

        predictions = []
        progress = ProgressBar()

        sess.run(tf.compat.v1.initializers.global_variables())

        # process all batches
        for i in progress(range(0, total_batches)):

            # make batches of images
            img_batch = x_test[i * batchsize:(i + 1) * batchsize]

            # run session to get a batch of predictions
            feed_dict = {images_in: img_batch}
            pred = sess.run([predicted_logit], feed_dict)
            predictions.append(pred)

    correct = 0
    wrong = 0

    # predictions is a list of length total_batches
    # each entry is a array which contains a list of length batchsize
    for i in range(total_batches):
        for j in range(batchsize):
            if predictions[i][0][j] == np.argmax(y_test[(i * batchsize) + j]):
                correct += 1
            else:
                wrong += 1

    # calculate accuracy
    acc = (correct / (total_batches * batchsize))

    print('Correct:', correct, 'Wrong:', wrong, 'Accuracy:',
          '{:.4f}'.format(acc))

    return
def graph_eval(input_graph_def, graph, input_node, output_node, batchsize):

    input_graph_def.ParseFromString(tf.io.gfile.GFile(graph, "rb").read())

    # CIFAR-10 dataset
    (_, _), (x_test, y_test) = datadownload()

    total_batches = int(len(x_test) / batchsize)

    tf.import_graph_def(input_graph_def, name='')

    # Get input placeholders & tensors
    images_in = tf.compat.v1.get_default_graph().get_tensor_by_name(
        input_node + ':0')
    labels = tf.compat.v1.placeholder(tf.int32, shape=[None, 10])

    # get output tensors
    logits = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node +
                                                                 ':0')
    predicted_logit = tf.argmax(input=logits, axis=1, output_type=tf.int32)
    ground_truth_label = tf.argmax(labels, 1, output_type=tf.int32)

    # Define the metric and update operations
    tf_acc, tf_acc_update = tf.compat.v1.metrics.accuracy(
        labels=ground_truth_label, predictions=predicted_logit, name='acc')

    with tf.compat.v1.Session() as sess:
        progress = ProgressBar()

        sess.run(tf.compat.v1.initializers.global_variables())
        sess.run(tf.compat.v1.initializers.local_variables())

        # process all batches
        for i in progress(range(0, total_batches)):

            # fetch a batch from validation dataset
            x_batch, y_batch = x_test[i*batchsize:i*batchsize+batchsize], \
                               y_test[i*batchsize:i*batchsize+batchsize]

            # Run graph for accuracy node
            feed_dict = {images_in: x_batch, labels: y_batch}
            acc = sess.run(tf_acc_update, feed_dict)

        acc = sess.run(tf_acc)
        print('Graph accuracy with validation dataset: {:1.4f}'.format(acc))

    return
示例#3
0
def graph_eval(input_graph_def, graph, input_node, output_node):

    input_graph_def.ParseFromString(tf.io.gfile.GFile(graph, "rb").read())

    # CIFAR-10 dataset
    (x_train, y_train), (x_test, y_test) = datadownload()

    tf.import_graph_def(input_graph_def, name='')

    # Get input placeholders & tensors
    images_in = tf.compat.v1.get_default_graph().get_tensor_by_name(
        input_node + ':0')
    labels = tf.compat.v1.placeholder(tf.int32,
                                      shape=[None, 10],
                                      name='labels')

    # get output tensors
    logits = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node +
                                                                 ':0')
    print('##################################################')
    print(logits)
    print('##################################################')

    predicted_logit = tf.compat.v1.argmax(input=logits,
                                          axis=1,
                                          output_type=tf.int32)
    # predicted_logit = tf.compat.v1.argmax(input=logits, axis=3, output_type=tf.int32)

    with tf.compat.v1.Session() as sess:

        sess.run(tf.compat.v1.initializers.global_variables())
        # Run graph to get predictions
        pred = sess.run(predicted_logit,
                        feed_dict={
                            images_in: x_test,
                            labels: y_test
                        })

    # iterate over the list of predictions and compare to ground truth
    acc = calc_acc(x_test, y_test, pred)

    print('Graph accuracy: {:1.2f}'.format(acc), '%', flush=True)

    return
示例#4
0
def train(input_height,input_width,input_chan,batchsize,learnrate,epochs,keras_hdf5,tboard):
    
    def step_decay(epoch):
        """
        Learning rate scheduler used by callback
        Reduces learning rate depending on number of epochs
        """
        lr = learnrate
        if epoch > 150:
            lr /= 1000
        elif epoch > 120:
            lr /= 100
        elif epoch > 80:
            lr /= 10
        elif epoch > 20:
            lr /= 2
        return lr
    

    # CIFAR10 dataset has 60k images. Training set is 50k, test set is 10k.
    # Each image is 32x32x8bits
    (x_train, y_train), (x_test, y_test) = datadownload()
    print ('Dataset downloaded and pre-processed')

    model = densenetx(input_shape=(input_height,input_width,input_chan),classes=10,theta=0.5,drop_rate=0.2,k=12,convlayers=[16,16,16])


    # prints a layer-by-layer summary of the network
    print('\n'+DIVIDER)
    print(' Model Summary')
    print(DIVIDER)
    print(model.summary())
    print("Model Inputs: {ips}".format(ips=(model.inputs)))
    print("Model Outputs: {ops}".format(ops=(model.outputs)))
    

    '''
    -----------------------------------------------
    CALLBACKS
    -----------------------------------------------
    '''

    chkpt_call = ModelCheckpoint(filepath=keras_hdf5,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)

    tb_call = TensorBoard(log_dir=tboard,
                          batch_size=batchsize,
                          update_freq='epoch')


    lr_scheduler_call = LearningRateScheduler(schedule=step_decay,
                                              verbose=1)

    lr_plateau_call = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                        cooldown=0,
                                        patience=5,
                                        min_lr=0.5e-6)

    callbacks_list = [tb_call, lr_scheduler_call, lr_plateau_call, chkpt_call]

    '''
    -----------------------------------------------
    TRAINING
    -----------------------------------------------
    '''

    '''
    Input image pipeline for training, validation
    
     data augmentation for training
       - random rotation
       - random horiz flip
       - random linear shift up and down
    '''
    data_augment = ImageDataGenerator(rotation_range=10,
                                      horizontal_flip=True,
                                      height_shift_range=0.1,
                                      width_shift_range=0.1,
                                      shear_range=0.1,
                                      zoom_range=0.1)

    train_generator = data_augment.flow(x=x_train,
                                        y=y_train,
                                        batch_size=batchsize,
                                        shuffle=True)
                                  
    '''
    Optimizer
    RMSprop used in this example.
    SGD  with Nesterov momentum was used in original paper
    '''
    #opt = SGD(lr=learnrate, momentum=0.9, nesterov=True)
    opt = RMSprop(lr=learnrate)
    
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


    # calculate number of steps in one training epoch
    train_steps = train_generator.n//train_generator.batch_size

    # run training
    model.fit_generator(generator=train_generator,
                        epochs=epochs,
                        steps_per_epoch=train_steps,
                        validation_data=(x_test, y_test),
                        callbacks=callbacks_list,
                        verbose=1)


    print("\nTensorBoard can be opened with the command: tensorboard --logdir={dir} --host localhost --port 6006".format(dir=tboard))

    print('\n'+DIVIDER)
    print(' Evaluate model accuracy with validation set..')
    print(DIVIDER)

    '''
    -----------------------------------------------
    EVALUATION
    -----------------------------------------------
    '''

    scores = model.evaluate(x=x_test,y=y_test,batch_size=50, verbose=0)
    print ('Evaluation Loss    : ', scores[0])
    print ('Evaluation Accuracy: ', scores[1])


    '''
    -----------------------------------------------
    PREDICTIONS
    -----------------------------------------------
    '''

    # make predictions
    predictions = model.predict(x_test,
                                batch_size=batchsize,
                                verbose=1)

    # check accuracy
    correct = 0
    wrong = 0
    for i in range(len(predictions)):
        pred = np.argmax(predictions[i])
        if (pred== np.argmax(y_test[i])):
            correct+=1
        else:
            wrong+=1

    print ('Correct predictions:',correct,' Wrong predictions:',wrong,' Accuracy:',(correct/len(predictions)))



    return