Exemplo n.º 1
0
def TSS(y_true, y_pred):
    """
    TSS

    import tensorflow as tf
    sess = tf.Session()
    a=tf.contrib.metrics.confusion_matrix([1, 0, 0, 0, 0], [1, 0, 1, 0, 0])
    a.eval(session=sess)
    array([[3, 1],
          [0, 1]], dtype=int32)
    a[0][0].eval(session=sess)
    3 -> tn
    a[0][1].eval(session=sess)
    1 -> fp
    """
    confusion_matrix = tf.confusion_matrix(labels=tf.argmax(y_true, 1),
                                           predictions=tf.argmax(y_pred, 1),
                                           num_classes=2,
                                           dtype=tf.float32)
    tp = confusion_matrix[1][1]
    fn = confusion_matrix[1][0]
    fp = confusion_matrix[0][1]
    tn = confusion_matrix[0][0]
    tmp1 = tf.divide(tp, tf.add(tp, fn))
    tmp2 = tf.divide(fp, tf.add(fp, tn))
    tss = tf.subtract(tmp1, tmp2)
    return tss
Exemplo n.º 2
0
def setup_training_and_eval_graphs(x, cluster_label, y, n_y, curl_model,
                                   classify_with_samples, is_training, name):
    """Set up the graph and return ops for training or evaluation.

    Args:
      x: tf placeholder for image.
      cluster_label: tf placeholder for ground truth cluster_label.
      y: tf placeholder for some self-supervised cluster_label/prediction.
      n_y: int, dimensionality of discrete latent variable y.
      curl_model: snt.AbstractModule representing the CURL model.
      classify_with_samples: bool, whether to *sample* latents for classification.
      is_training: bool, whether this graph is the training graph.
      name: str, graph name.

    Returns:
      A namedtuple with the required graph ops to perform training or evaluation.

    """
    # kl_y_supervised is -log q(y=y_true | x)
    (log_p_x, kl_y, kl_z, log_p_x_supervised, kl_y_supervised,
     kl_z_supervised) = curl_model.log_prob_elbo_components(x, y)

    ll = log_p_x - kl_y - kl_z
    elbo = -tf.reduce_mean(ll)

    # Supervised loss, either for SMGR, or adaptation to supervised benchmark.
    ll_supervised = log_p_x_supervised - kl_y_supervised - kl_z_supervised
    elbo_supervised = -tf.reduce_mean(ll_supervised)

    # Summaries
    kl_y = tf.reduce_mean(kl_y)
    kl_z = tf.reduce_mean(kl_z)
    log_p_x_supervised = tf.reduce_mean(log_p_x_supervised)
    kl_y_supervised = tf.reduce_mean(kl_y_supervised)
    kl_z_supervised = tf.reduce_mean(kl_z_supervised)

    # Evaluation.
    hiddens = curl_model.get_shared_rep(x, is_training=is_training)
    cat = curl_model.infer_cluster(hiddens)
    cat_probs = cat.probs

    # Not really a confusion matrix, more like a Class/Cluster relation matrix
    confusion = tf.confusion_matrix(cluster_label,
                                    tf.argmax(cat_probs, axis=1),
                                    num_classes=n_y,
                                    name=name + '_confusion')
    purity = (tf.reduce_sum(tf.reduce_max(confusion, axis=0)) /
              tf.reduce_sum(confusion))

    if classify_with_samples:
        latents = curl_model.infer_latent(hiddens=hiddens,
                                          y=tf.to_float(
                                              cat.sample())).sample()
    else:
        latents = curl_model.infer_latent(hiddens=hiddens,
                                          y=tf.to_float(cat.mode())).mean()

    return MainOps(elbo, ll, log_p_x, kl_y, kl_z, elbo_supervised,
                   ll_supervised, log_p_x_supervised, kl_y_supervised,
                   kl_z_supervised, cat_probs, confusion, purity, latents)
Exemplo n.º 3
0
def get_aggregated_conf_matrix(aggregateIndexes, testLabels, predictedLabels):
    testLabelsAggregated = np.digitize(testLabels, aggregateIndexes) - 1
    predictedLabelsAggregated = np.digitize(predictedLabels,
                                            aggregateIndexes) - 1
    confMatrixAggregated = tf.confusion_matrix(
        testLabelsAggregated, predictedLabelsAggregated).eval()
    np.set_printoptions(precision=2)
    print(confMatrixAggregated)

    return confMatrixAggregated
Exemplo n.º 4
0
def calc_model_metrics(modelFilename,
                       testLabels,
                       testImages,
                       testTypeNames,
                       typeNamesList,
                       snTypes=None,
                       fig_dir='.'):
    tf.reset_default_graph()
    nw = len(testImages[0])
    nBins = len(typeNamesList)
    imWidthReduc = 8
    imWidth = 32  # Image size and width

    x, y_, keep_prob, y_conv, W, b = convnet_variables(imWidth, imWidthReduc,
                                                       nw, nBins)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        saver.restore(sess, modelFilename)

        yy = y_conv.eval(feed_dict={x: testImages, keep_prob: 1.0})

        # CONFUSION MATRIX
        predictedLabels = []
        for i, name in enumerate(testTypeNames):
            predictedLabels.append(np.argmax(yy[i]))
        predictedLabels = np.array(predictedLabels)
        confMatrix = tf.confusion_matrix(testLabels, predictedLabels).eval()

        # Aggregate age conf matrix
        aggregateAgesIndexes = np.arange(0, nBins + 1,
                                         int(nBins / len(snTypes)))
        confMatrixAggregateAges = get_aggregated_conf_matrix(
            aggregateAgesIndexes, testLabels, predictedLabels)
        classnames = np.copy(snTypes)
        if confMatrixAggregateAges.shape[0] < len(classnames):
            classnames = classnames[:-1]
        plot_confusion_matrix(confMatrixAggregateAges,
                              classes=classnames,
                              normalize=True,
                              title='',
                              fig_dir=fig_dir,
                              name='aggregate_ages',
                              fontsize_labels=23,
                              fontsize_matrix=21)

        # Aggregate age and subtypes conf matrix
        aggregateSubtypesIndexes = np.array([0, 108, 180, 234, 306])
        broadTypes = ['Ia', 'Ib', 'Ic', 'II']
        confMatrixAggregateSubtypes = get_aggregated_conf_matrix(
            aggregateSubtypesIndexes, testLabels, predictedLabels)
        plot_confusion_matrix(confMatrixAggregateSubtypes,
                              classes=broadTypes,
                              normalize=True,
                              title='',
                              fig_dir=fig_dir,
                              name='aggregate_subtypes',
                              fontsize_labels=35,
                              fontsize_matrix=35)
        # plt.show()

    np.set_printoptions(precision=2)
    print(confMatrix)
    plot_confusion_matrix(confMatrix,
                          classes=typeNamesList,
                          normalize=True,
                          title='',
                          fig_dir=fig_dir,
                          name='all',
                          fontsize_labels=2,
                          fontsize_matrix=1)

    # ACTUAL ACCURACY, broadTYPE ACCURACY, AGE ACCURACY
    typeAndAgeCorrect = 0
    typeCorrect = 0
    broadTypeCorrect = 0
    broadTypeAndAgeCorrect = 0
    typeAndNearAgeCorrect = 0
    broadTypeAndNearAgeCorrect = 0
    for i in range(len(testTypeNames)):
        predictedIndex = np.argmax(yy[i])

        classification = testTypeNames[i].split(': ')
        if len(classification) == 2:
            testType, testAge = classification
        else:
            testGalType, testType, testAge = classification
        actual = typeNamesList[predictedIndex].split(': ')
        if len(actual) == 2:
            actualType, actualAge = actual
        else:
            actualGalType, actualType, actualAge = actual

        testBroadType = testType[0:2]
        actualBroadType = actualType[0:2]
        if testType[0:3] == 'IIb':
            testBroadType = 'Ib'
        if actualType[0:3] == 'IIb':
            actualBroadType = 'Ib'
        nearTestAge = testAge.split(' to ')

        if testTypeNames[i] == typeNamesList[predictedIndex]:
            typeAndAgeCorrect += 1
        if testType == actualType:  # correct type
            typeCorrect += 1
            if (nearTestAge[0] in actualAge) or (
                    nearTestAge[1] in actualAge
            ):  # check if the age is in the neigbouring bin
                typeAndNearAgeCorrect += 1  # all correct except nearby bin
        if testBroadType == actualBroadType:  # correct broadtype
            broadTypeCorrect += 1
            if testAge == actualAge:
                broadTypeAndAgeCorrect += 1
            if (nearTestAge[0] in actualAge) or (
                    nearTestAge[1] in actualAge
            ):  # check if the age is in the neigbouring bin
                broadTypeAndNearAgeCorrect += 1  # Broadtype and nearby bin

    typeAndAgeAccuracy = float(typeAndAgeCorrect) / len(testTypeNames)
    typeAccuracy = float(typeCorrect) / len(testTypeNames)
    broadTypeAccuracy = float(broadTypeCorrect) / len(testTypeNames)
    broadTypeAndAgeAccuracy = float(broadTypeAndAgeCorrect) / len(
        testTypeNames)
    typeAndNearAgeAccuracy = float(typeAndNearAgeCorrect) / len(testTypeNames)
    broadTypeAndNearAgeAccuracy = float(broadTypeAndNearAgeCorrect) / len(
        testTypeNames)

    print("typeAndAgeAccuracy : " + str(typeAndAgeAccuracy))
    print("typeAccuracy : " + str(typeAccuracy))
    print("broadTypeAccuracy : " + str(broadTypeAccuracy))
    print("broadTypeAndAgeAccuracy: " + str(broadTypeAndAgeAccuracy))
    print("typeAndNearAgeAccuracy : " + str(typeAndNearAgeAccuracy))
    print("broadTypeAndNearAgeAccuracy : " + str(broadTypeAndNearAgeAccuracy))
Exemplo n.º 5
0
    saver.restore(sess, tf.train.latest_checkpoint(MODELS))

    graph = tf.get_default_graph()

    x = graph.get_tensor_by_name("x:0")
    y = graph.get_tensor_by_name("y:0")
    softmax = graph.get_tensor_by_name("softmax:0")
    accuracy = graph.get_tensor_by_name("accuracy:0")
    feed_dict = {x: IMAGES, y: LABELS}

    pred = sess.run([softmax, accuracy], feed_dict=feed_dict)
    with open(os.path.join(dirname, '../metrics/eval.json'), 'w') as outfile:
        json.dump({"accuracy": str(pred[1])}, outfile)

    tf_confusion_matrix = tf.confusion_matrix(labels=tf.argmax(LABELS, 1),
                                              predictions=tf.argmax(
                                                  pred[0], 1),
                                              num_classes=10)
    tf_confusion_matrix = tf_confusion_matrix.eval()

    confusion_matrix = []
    for idx, row in enumerate(tf_confusion_matrix):
        for idy, column in enumerate(row):
            confusion_matrix.append({
                "label": "Class " + str(idy),
                "prediction": "Class " + str(idx),
                "count": str(column)
            })

    with open(os.path.join(dirname, '../metrics/confusion_matrix.json'),
              'w') as outfile:
        json.dump(confusion_matrix, outfile)
Exemplo n.º 6
0
def main(_):
    # We want to see all the logging messages for this tutorial.
    tf.logging.set_verbosity(tf.logging.INFO)
    np.set_printoptions(threshold=np.inf, linewidth=10000)

    flags = vars(FLAGS)
    for key in sorted(flags.keys()):
        tf.logging.info('%s = %s', key, flags[key])

    if FLAGS.random_seed_weights != -1:
        tf.random.set_random_seed(FLAGS.random_seed_weights)

    # Start a new TensorFlow session.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    #config.log_device_placement = False
    sess = tf.InteractiveSession(config=config)

    # Begin by making sure we have the training data we need. If you already have
    # training data of your own, use `--data_url= ` on the command line to avoid
    # downloading.

    label_count = len(
        input_data.prepare_words_list(FLAGS.wanted_words.split(','),
                                      FLAGS.silence_percentage,
                                      FLAGS.unknown_percentage))

    model_settings = models.prepare_model_settings(
        label_count, FLAGS.sample_rate, FLAGS.nchannels,
        FLAGS.clip_duration_ms, FLAGS.representation, FLAGS.window_size_ms,
        FLAGS.window_stride_ms, 1, FLAGS.dct_coefficient_count,
        FLAGS.filterbank_channel_count,
        [int(x) for x in FLAGS.filter_counts.split(',')],
        [int(x)
         for x in FLAGS.filter_sizes.split(',')], FLAGS.final_filter_len,
        FLAGS.dropout_prob, FLAGS.batch_size, FLAGS.dilate_after_layer,
        FLAGS.stride_after_layer, FLAGS.connection_type)

    fingerprint_size = model_settings['fingerprint_size']
    time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
    # Figure out the learning rates for each training phase. Since it's often
    # effective to have high learning rates at the start of training, followed by
    # lower levels towards the end, the number of steps and learning rates can be
    # specified as comma-separated lists to define the rate at each stage. For
    # example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
    # will run 13,000 training loops in total, with a rate of 0.001 for the first
    # 10,000, and 0.0001 for the final 3,000.
    training_steps_list = list(
        map(int, FLAGS.how_many_training_steps.split(',')))
    learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
    if len(training_steps_list) != len(learning_rates_list):
        raise Exception(
            '--how_many_training_steps and --learning_rate must be equal length '
            'lists, but are %d and %d long instead' %
            (len(training_steps_list), len(learning_rates_list)))

    actual_batch_size = tf.placeholder(tf.int32, [1])

    fingerprint_input = tf.placeholder(tf.float32, [None, fingerprint_size],
                                       name='fingerprint_input')

    hidden, logits, dropout_prob = models.create_model(
        fingerprint_input,
        model_settings,
        FLAGS.model_architecture,
        is_training=True)

    # Define loss and optimizer
    ground_truth_input = tf.placeholder(tf.int64, [None],
                                        name='groundtruth_input')

    # Optionally we can add runtime checks to spot when NaNs or other symptoms of
    # numerical errors start occurring during training.
    control_dependencies = []
    if FLAGS.check_nans:
        checks = tf.add_check_numerics_ops()
        control_dependencies = [checks]

    # Create the back propagation and training evaluation machinery in the graph.
    with tf.name_scope('cross_entropy'):
        cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
            labels=tf.slice(ground_truth_input, [0], actual_batch_size),
            logits=tf.slice(logits, [0, 0],
                            tf.concat([actual_batch_size, [-1]], 0)))
    tf.summary.scalar('cross_entropy', cross_entropy_mean)
    with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
        learning_rate_input = tf.placeholder(tf.float32, [],
                                             name='learning_rate_input')
        if FLAGS.optimizer == 'sgd':
            train_step = tf.train.GradientDescentOptimizer(
                learning_rate_input).minimize(cross_entropy_mean)
        elif FLAGS.optimizer == 'adam':
            train_step = tf.train.AdamOptimizer(learning_rate_input).minimize(
                cross_entropy_mean)
        elif FLAGS.optimizer == 'adagrad':
            train_step = tf.train.AdagradOptimizer(
                learning_rate_input).minimize(cross_entropy_mean)
        elif FLAGS.optimizer == 'rmsprop':
            train_step = tf.train.RMSPropOptimizer(
                learning_rate_input).minimize(cross_entropy_mean)
    predicted_indices = tf.argmax(logits, 1)
    correct_prediction = tf.equal(predicted_indices, ground_truth_input)
    confusion_matrix = tf.confusion_matrix(tf.slice(ground_truth_input, [0],
                                                    actual_batch_size),
                                           tf.slice(predicted_indices, [0],
                                                    actual_batch_size),
                                           num_classes=label_count)
    evaluation_step = tf.reduce_mean(
        tf.cast(tf.slice(correct_prediction, [0], actual_batch_size),
                tf.float32))
    tf.summary.scalar('accuracy', evaluation_step)

    global_step = tf.train.get_or_create_global_step()
    increment_global_step = tf.assign(global_step, global_step + 1)

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=0)

    # Merge all the summaries and write them out to /tmp/retrain_logs (by default)
    merged_summaries = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
                                         sess.graph)
    validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir +
                                              '/validation')

    tf.global_variables_initializer().run()

    start_step = 1

    if FLAGS.start_checkpoint:
        models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
        start_step = 1 + global_step.eval(session=sess)

    t0 = dt.datetime.now()
    tf.logging.info('Training from time %s, step: %d ', t0.isoformat(),
                    start_step)

    # Save graph.pbtxt.
    tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
                         FLAGS.model_architecture + '.pbtxt')

    # Save list of words.
    if FLAGS.start_checkpoint == '':
        with gfile.GFile(os.path.join(FLAGS.train_dir, \
                                      FLAGS.model_architecture + '_labels.txt'), 'w') as f:
            f.write(FLAGS.wanted_words.replace(',', '\n'))

    # log complexity of model
    total_parameters = 0
    for variable in tf.trainable_variables():
        shape = variable.get_shape()
        variable_parameters = 1
        for dim in shape:
            variable_parameters *= int(dim)
        total_parameters += variable_parameters
    tf.logging.info('number of trainable parameters: %d', total_parameters)

    checkpoint_path = os.path.join(FLAGS.train_dir,
                                   FLAGS.model_architecture + '.ckpt')
    if FLAGS.start_checkpoint == '':
        tf.logging.info('Saving to "%s-%d"', checkpoint_path, 0)
        saver.save(sess, checkpoint_path, global_step=0)

    audio_processor = input_data.AudioProcessor(
        FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
        FLAGS.unknown_percentage, FLAGS.wanted_words.split(','),
        FLAGS.labels_touse.split(','),
        FLAGS.validation_percentage, FLAGS.validation_offset_percentage,
        FLAGS.validation_files.split(','), FLAGS.testing_percentage,
        FLAGS.testing_files.split(','), FLAGS.subsample_skip,
        FLAGS.subsample_word, FLAGS.partition_word, FLAGS.partition_n,
        FLAGS.partition_training_files.split(','),
        FLAGS.partition_validation_files.split(','), FLAGS.random_seed_batch,
        FLAGS.testing_equalize_ratio, FLAGS.testing_max_samples,
        model_settings)

    # exit if how_many_training_steps==0
    if FLAGS.how_many_training_steps == '0':
        # pre-process a batch of data to make sure settings are valid
        train_fingerprints, train_ground_truth, _ = audio_processor.get_data(
            FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
            FLAGS.background_volume, time_shift_samples,
            FLAGS.time_shift_random, 'training', sess)
        sess.run(
            [evaluation_step],
            feed_dict={
                fingerprint_input: train_fingerprints,
                ground_truth_input: train_ground_truth,
                learning_rate_input: learning_rates_list[0],
                actual_batch_size: [FLAGS.batch_size],
                dropout_prob: model_settings['dropout_prob']
            })
        return

    training_set_size = audio_processor.set_size('training')
    testing_set_size = audio_processor.set_size('testing')
    validation_set_size = audio_processor.set_size('validation')

    # Training loop.
    training_steps_max = np.sum(training_steps_list)
    for training_step in xrange(start_step, training_steps_max + 1):
        if training_set_size > 0 and FLAGS.save_step_interval > 0:
            # Figure out what the current learning rate is.
            training_steps_sum = 0
            for i in range(len(training_steps_list)):
                training_steps_sum += training_steps_list[i]
                if training_step <= training_steps_sum:
                    learning_rate_value = learning_rates_list[i]
                    break
            # Pull the audio samples we'll use for training.
            train_fingerprints, train_ground_truth, _ = audio_processor.get_data(
                FLAGS.batch_size, 0, model_settings,
                FLAGS.background_frequency, FLAGS.background_volume,
                time_shift_samples, FLAGS.time_shift_random, 'training', sess)
            # Run the graph with this batch of training data.
            train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
                [
                    merged_summaries, evaluation_step, cross_entropy_mean,
                    train_step, increment_global_step
                ],
                feed_dict={
                    fingerprint_input: train_fingerprints,
                    ground_truth_input: train_ground_truth,
                    learning_rate_input: learning_rate_value,
                    actual_batch_size: [FLAGS.batch_size],
                    dropout_prob: model_settings['dropout_prob']
                })
            train_writer.add_summary(train_summary, training_step)
            t1 = dt.datetime.now() - t0
            tf.logging.info(
                'Elapsed %f, Step #%d: rate %f, accuracy %.1f%%, cross entropy %f'
                % (t1.total_seconds(), training_step, learning_rate_value,
                   train_accuracy * 100, cross_entropy_value))

            # Save the model checkpoint periodically.
            if (training_step % FLAGS.save_step_interval == 0
                    or training_step == training_steps_max):
                tf.logging.info('Saving to "%s-%d"', checkpoint_path,
                                training_step)
                saver.save(sess, checkpoint_path, global_step=training_step)

        is_last_step = (training_step == training_steps_max)
        if validation_set_size > 0 and (is_last_step or
                                        (training_step %
                                         FLAGS.eval_step_interval) == 0):
            validate_and_test('validation', validation_set_size, model_settings, \
                              time_shift_samples, sess, merged_summaries, evaluation_step, \
                              confusion_matrix, logits, hidden, validation_writer, \
                              audio_processor, is_last_step, fingerprint_input, \
                              ground_truth_input, actual_batch_size, dropout_prob, \
                              training_step, t0)
    if testing_set_size > 0:
        validate_and_test('testing', testing_set_size, model_settings, time_shift_samples, \
                          sess, merged_summaries, evaluation_step, confusion_matrix, \
                          logits, hidden, validation_writer, audio_processor, \
                          True, fingerprint_input, ground_truth_input, \
                          actual_batch_size, dropout_prob, training_steps_max, t0)
Exemplo n.º 7
0
def main():
    raw_data = pandas.read_csv('dataset/voice.csv')

    x_o_train, y_o_train, x_o_test, y_o_test = get_shuffled_divided_data(
        raw_data)

    # initialize input and output vectors
    X = tf.placeholder(tf.float32, [None, constants['features']])
    Y = tf.placeholder(tf.float32, [None, constants['labels']])

    # initialize weights and biases randomly

    ###################################################################################################################
    W1 = tf.Variable(0.001 * np.random.randn(
        constants['features'], constants['hidden1_size']).astype(np.float32))
    B1 = tf.Variable(
        0.001 * np.random.randn(constants['hidden1_size']).astype(np.float32))
    Z1 = tf.nn.relu(tf.matmul(X, W1) + B1)
    W2 = tf.Variable(
        0.001 * np.random.randn(constants['hidden1_size'],
                                constants['hidden2_size']).astype(np.float32))
    B2 = tf.Variable(
        0.001 * np.random.randn(constants['hidden2_size']).astype(np.float32))
    Z2 = tf.nn.relu(tf.matmul(Z1, W2) + B2)
    W3 = tf.Variable(0.001 * np.random.randn(
        constants['hidden2_size'], constants['labels']).astype(np.float32))
    B3 = tf.Variable(0.001 *
                     np.random.randn(constants['labels']).astype(np.float32))
    ###################################################################################################################

    hyp = tf.nn.softmax(tf.add(tf.matmul(Z2, W3), B3))

    cost = tf.reduce_mean(
        -tf.reduce_sum(Y * tf.log(tf.clip_by_value(hyp, 1e-10, 1.0))))

    optimizer = tf.train.GradientDescentOptimizer(
        learning_rate=constants['alpha']).minimize(cost)

    init = tf.global_variables_initializer()

    # calculate bin size for training
    constants['bins'] = int(x_o_train.shape[0] / constants['bin_size'])

    training_start_time = dt.now()

    # start tensor session
    with tf.Session() as sess:
        y_o_train = sess.run(tf.one_hot(y_o_train, constants['labels']))
        y_o_test = sess.run(tf.one_hot(y_o_test, constants['labels']))

        sess.run(init)

        cost_hist = []

        # model training (optimizer)
        for epoch in range(constants['epochs']):
            for bi in range(constants['bins']):

                start_point = bi * epoch
                end_point = start_point + constants['bin_size']

                x = x_o_train[start_point:end_point]
                y = y_o_train[start_point:end_point]

                sess.run(optimizer, feed_dict={X: x, Y: y})
                c = sess.run(cost, feed_dict={X: x, Y: y})

            if (epoch % 500 is 0
                    and epoch is not 0) or (epoch is constants['epochs'] - 1):
                cost_hist.append(c)
                print('\rEpoch: {} Cost: {}'.format(str(epoch), str(c)))

        training_end_time = dt.now()

        # model testing
        correct_prediction = tf.equal(tf.argmax(hyp, 1), tf.argmax(Y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        acc = accuracy.eval({X: x_o_test, Y: y_o_test}) * 100

        # ready the stat output
        first_cost = cost_hist[0]
        last_cost = cost_hist[-1]
        avg_cost = sum(cost_hist) / len(cost_hist)
        cost_hist.sort()
        lowest_cost = cost_hist[0]
        highest_cost = cost_hist[-1]
        training_time = training_end_time - training_start_time

        print('Finished Running\n')

        print('Running Details:\n'
              '\tNumber of Epochs Set To : {}\n'.format(constants['epochs']) +
              '\tNumber of Bins Set To : {}\n'.format(constants['bins'] + 1) +
              '\tSize of Bin Per Epoch : {}\n'.format(constants['bin_size']) +
              '\tTotal Training Cycles : {}\n'.format(constants['epochs'] *
                                                      (constants['bins']) +
                                                      1) +
              '\tLearning Rate Set To : {}\n'.format(constants['alpha']) +
              '\tTotal Training Time : {}\n'.format(str(training_time)))

        print('Costs:\n'
              '\tFirst Recorded Cost : {}\n'.format(first_cost) +
              '\tLast Recorded Cost : {}\n'.format(last_cost) +
              '\tAverage Cost : {}\n'.format(avg_cost) +
              '\tLowest Recorded Cost : {}\n'.format(lowest_cost) +
              '\tHighest Recorded Cost : {}\n'.format(highest_cost))

        print('Accuracy:\n' '\tFinal Accuracy: {} %\n'.format(acc))

        print('Confusion Matrix:')

        # confusion matrix
        conf_mat = tf.confusion_matrix(labels=tf.argmax(Y, 1),
                                       predictions=tf.argmax(hyp, 1),
                                       num_classes=2)
        conf_mat_to_print = sess.run(conf_mat,
                                     feed_dict={
                                         X: x_o_test,
                                         Y: y_o_test
                                     })
        print(conf_mat_to_print)
Exemplo n.º 8
0
def main():
    raw_data = pandas.read_csv('dataset/voice.csv')

    x_o_train, y_o_train, x_o_test, y_o_test = get_shuffled_divided_data(
        raw_data)

    # initialize input and output vectors
    X = tf.placeholder(tf.float32, [None, constants['features']])
    Y = tf.placeholder(tf.float32, [None, constants['labels']])

    # initialize weights and biases randomly
    W = tf.Variable(0.001 * np.random.randn(
        constants['features'], constants['labels']).astype(np.float32))
    b = tf.Variable(0.001 *
                    np.random.randn(constants['labels']).astype(np.float32))

    hyp = tf.nn.softmax(tf.add(tf.matmul(X, W), b))

    cost = tf.reduce_mean(
        -tf.reduce_sum(Y * tf.log(tf.clip_by_value(hyp, 1e-10, 1.0))))

    optimizer = tf.train.GradientDescentOptimizer(
        learning_rate=constants['alpha']).minimize(cost)

    init = tf.global_variables_initializer()

    # calculate bin size for training
    constants['bins'] = int(x_o_train.shape[0] / constants['bin_size'])

    training_start_time = dt.now()

    # start tensor session
    with tf.Session() as sess:
        y_o_train = sess.run(tf.one_hot(y_o_train, constants['labels']))
        y_o_test = sess.run(tf.one_hot(y_o_test, constants['labels']))

        sess.run(init)

        cost_hist = []

        # model training (optimizer)
        for epoch in range(constants['epochs']):
            for bi in range(constants['bins']):

                start_point = bi * epoch
                end_point = start_point + constants['bin_size']

                x = x_o_train[start_point:end_point]
                y = y_o_train[start_point:end_point]

                sess.run(optimizer, feed_dict={X: x, Y: y})
                c = sess.run(cost, feed_dict={X: x, Y: y})

            if (epoch % 500 is 0
                    and epoch is not 0) or (epoch is constants['epochs'] - 1):
                cost_hist.append(c)
                print('\rEpoch: {} Cost: {}'.format(str(epoch), str(c)))
                # print('\rW: {}, b: {}'.format(W.eval(sess), b.eval(sess)))

        training_end_time = dt.now()

        # model testing
        correct_prediction = tf.equal(tf.argmax(hyp, 1), tf.argmax(Y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        acc = accuracy.eval({X: x_o_test, Y: y_o_test}) * 100

        print_stats(acc, cost_hist, training_end_time, training_start_time)

        print('Confusion Matrix:')

        # confusion matrix
        conf_mat = tf.confusion_matrix(labels=tf.argmax(Y, 1),
                                       predictions=tf.argmax(hyp, 1),
                                       num_classes=2)
        conf_mat_to_print = sess.run(conf_mat,
                                     feed_dict={
                                         X: x_o_test,
                                         Y: y_o_test
                                     })
        print(conf_mat_to_print)