예제 #1
0
def trainning():
    (X, Y), (X_test, Y_test) = cifar10.load_data()
    Y = cifar10.to_categorical(Y, 10)
    Y_test = cifar10.to_categorical(Y_test, 10)
    data_set = cifar10.read_data_sets(X, Y, X_test, Y_test)
    # mnist = input_data.read_data_sets("tmp/mnist", one_hot=True)
    # batch_x, batch_y = data_set.train.next_batch(96)

    x_placeholder = tf.placeholder("float", [None, 32 * 32 * 3])
    y_placeholder = tf.placeholder("float", [None, 10])

    logits = cifar10.inference(x_placeholder)
    loss = cifar10.loss(logits, y_placeholder)
    train_op = cifar10.train_op(loss=loss, learning_rate=0.001)
    accuracy = cifar10.accuracy(logits, y_placeholder)
    init = tf.initialize_all_variables()

    with tf.Session() as sess:
        sess.run(init)
        for step in range(MAX_STEPS):
            # print('step = {:d}'.format(step + 1))
            batch_x, batch_y = data_set.train.next_batch(96)
            # print(batch_x.shape)
            # print(batch_y.shape)
            _, Loss, acc = sess.run([train_op, loss, accuracy],
                                    feed_dict={
                                        x_placeholder: batch_x,
                                        y_placeholder: batch_y
                                    })
            if (step + 1) % 100 == 0:
                print("step: {:d} loss: {:f} acc: {:f}".format(
                    step + 1, Loss, acc))
def get_dataset(dataset_name, seed=0, test=False):
    if dataset_name == "mnist":
        data_folder = './data/mnist'
        if not os.path.exists(data_folder):
            os.makedirs(data_folder)
        if not test:
            dataset = mnist.read_data_sets("./data/mnist", seed=seed).train
        else:
            dataset = mnist.read_data_sets("./data/mnist", seed=seed).test
    elif dataset_name == "cifar-10":
        if not test:
            dataset = cifar10.read_data_sets("./data/cifar-10",
                                             seed=seed).train
        else:
            dataset = cifar10.read_data_sets("./data/cifar-10", seed=seed).test
    else:
        raise Exception("Not implemented.")
    return dataset
예제 #3
0
    biases = _variable_on_cpu('biases', [NUM_CLASSES],
                              tf.constant_initializer(0.0))
    pred = tf.add(tf.matmul(local4, weights), biases, name=scope.name)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))

correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph

cifar10 = read_data_sets("/tmp/data")
config = tf.ConfigProto(device_count={
    "CPU": 1,
    "GPU": 0
},
                        inter_op_parallelism_threads=1,
                        intra_op_parallelism_threads=1)
sess = tf.Session(config=config)
sess.run(init)
data_x, data_y = cifar10.train.images[0:30], cifar10.train.labels[0:30]
feed = {x: data_x, y: data_y}

mini = MCMC(accuracy, {
    x: cifar10.test.images,
    y: cifar10.test.labels
}, sess, 0, MPI.COMM_WORLD)
예제 #4
0
def run_training():
    """Train BinaryConnect."""
    # Get the sets of images and labels for training, validation, and
    # test on CIFAR10.
    data_sets = cifar10.read_data_sets(dst_dir='./dataset',
                                       validation_size=5000)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, train_placeholder = placeholder_inputs(
            FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = bc.inference_bin(images_placeholder, train_placeholder,
                                  stochastic=FLAGS.stochastic,
                                  use_bnorm=True) \
            if FLAGS.binary \
            else bc.inference_ref(images_placeholder, train_placeholder,
                                  use_bnorm=True)

        # Add to the Graph the Ops for loss calculation.
        loss = bc.loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = bc.training(loss, FLAGS.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_metric = bc.evaluation(logits, labels_placeholder)

        # Add a placeholder for logging execution time
        # frequency_placeholder = tf.placeholder(tf.float32, shape=())
        # tf.summary.scalar('Execution Time', frequency_placeholder)
        # TODO: support a d separate summary for metadata (e.g. execution time)

        # Build the summary Tensor based on the TF collection of Summaries.
        summary = tf.summary.merge_all()

        # Add the variable initializer Op.
        ivars = tf.global_variables() + tf.local_variables()
        init = tf.variables_initializer(ivars)

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a logger to the validation accuracy
        val_acc_pl = tf.placeholder(tf.float32, shape=())
        summary_val_acc = tf.summary.scalar(name='validation_acc',
                                            tensor=val_acc_pl,
                                            collections=['validation'])
        summary_val = tf.summary.merge([summary_val_acc])

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.log_dir, 'train'), sess.graph)
        summary_writer_val = tf.summary.FileWriter(
            os.path.join(FLAGS.log_dir, 'val'), sess.graph)

        # And then after everything is built:

        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop.
        duration = 0
        tp_value_total = 0
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder, train_placeholder,
                                       True)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value, acc_val = sess.run([train_op, loss, eval_metric],
                                              feed_dict=feed_dict)

            duration += time.time() - start_time
            tp_value_total += acc_val

            # Write the summaries and print an overview fairly often.
            if step % 100 == 0 and step > 0:
                # Print status to stdout.
                images_freq = 100 * FLAGS.batch_size / duration
                print(
                    'Step %d: loss = %.2f, correct = %.2f%% (%.3f images/sec)'
                    % (step, loss_value, tp_value_total / FLAGS.batch_size,
                       images_freq))
                duration = time.time() - start_time
                tp_value_total = 0
                duration = 0
                # Update the events file.
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer_train.add_summary(summary_str, step)
                summary_writer_train.flush()

            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 500 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                # print('Training Data Eval:')
                # do_eval(sess,
                #         eval_metric,
                #         images_placeholder,
                #         labels_placeholder,
                #         train_placeholder,
                #         data_sets.train, summary)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                accuracy_val = do_eval(sess, eval_metric, images_placeholder,
                                       labels_placeholder, train_placeholder,
                                       data_sets.validation)
                # TODO: find a way to collect summaries for validation
                summary_str = sess.run(summary_val,
                                       feed_dict={val_acc_pl: accuracy_val})
                summary_writer_val.add_summary(summary_str, step)
                summary_writer_val.flush()

                # Evaluate against the test set.
                print('Test Data Eval:')
                do_eval(sess, eval_metric, images_placeholder,
                        labels_placeholder, train_placeholder, data_sets.test)
예제 #5
0
                                          stddev=1/192.0, wd=0.0)
    biases = _variable_on_cpu('biases', [NUM_CLASSES],
                              tf.constant_initializer(0.0))
    pred = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
    
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))

correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
cifar10 = read_data_sets("/tmp/data")
config = tf.ConfigProto(device_count={"CPU": 1, "GPU": 0},
                            inter_op_parallelism_threads=1,
                            intra_op_parallelism_threads=1)
sess=tf.Session(config=config)
sess.run(init)
data_x, data_y = cifar10.train.images[0:30],cifar10.train.labels[0:30]
training_size = len(data_x)
param=[]
batch_size = training_size
for t in tf.trainable_variables():
    param.append(t.eval(session=sess))
if rank==0:
    server=ParamServer(param,comm)
    while True:
        core,data=server.next_request([x for x in range(1,size)])
예제 #6
0
    biases = _variable_on_cpu('biases', [NUM_CLASSES],
                              tf.constant_initializer(0.0))
    pred = tf.add(tf.matmul(local4, weights), biases, name=scope.name)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))

correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
if rank == 0:
    cifar10 = read_data_sets("")

config = tf.ConfigProto(device_count={
    "CPU": 1,
    "GPU": 0
},
                        inter_op_parallelism_threads=1,
                        intra_op_parallelism_threads=1)
sess = tf.Session(config=config)
sess.run(init)
if rank == 0:
    tx, ty = cifar10.train.images, cifar10.train.labels
    train_size = len(tx)
    tx, ty = comm.bcast((tx, ty), root=0)
else:
    tx, ty = comm.bcast([], root=0)
예제 #7
0
def run_testing():
    """Test BinaryConnect."""
    # Get the sets of images and labels for training, validation, and
    # test on CIFAR10.
    data_sets = cifar10.read_data_sets(dst_dir='./dataset',
                                       validation_size=5000)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, train_placeholder = placeholder_inputs(
            FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = bc.inference_bin(images_placeholder, train_placeholder,
                                  stochastic=FLAGS.stochastic,
                                  use_bnorm=True) \
            if FLAGS.binary \
            else bc.inference_ref(images_placeholder, train_placeholder,
                                  use_bnorm=True)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_metric = bc.evaluation(logits, labels_placeholder)

        # Add the variable initializer Op.
        ivars = tf.global_variables() + tf.local_variables()
        init = tf.variables_initializer(ivars)

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Load trained model.
        saver = tf.train.Saver()
        saver.restore(sess, FLAGS.model_path)
        print("Model loaded.")

        # And then after everything is built:

        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop
        duration = 0
        tp_value_total = 0
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            # Fill a feed dictionary with the actual set of images and labels
            # for this particular testing step.
            feed_dict = fill_feed_dict(data_sets.test, images_placeholder,
                                       labels_placeholder, train_placeholder,
                                       False)

            # Run one step of the model.
            acc_val = sess.run(eval_metric, feed_dict=feed_dict)
            duration += time.time() - start_time
            tp_value_total += acc_val

            # Print an overview
            if step % 100 == 0:
                # Print status to stdout.
                images_freq = 100 * FLAGS.batch_size / duration
                print('Step %d: correct = %.2f%% (%.3f images/sec)' %
                      (step, tp_value_total / step, images_freq))
                duration = time.time() - start_time
                duration = 0
예제 #8
0
    biases = _variable_on_cpu('biases', [NUM_CLASSES],
                              tf.constant_initializer(0.0))
    pred = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
    
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))

correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
if rank==0:
    cifar10 = read_data_sets("")

config = tf.ConfigProto(device_count={"CPU": 1, "GPU": 0},
                            inter_op_parallelism_threads=1,
                            intra_op_parallelism_threads=1)
sess=tf.Session(config=config)
sess.run(init)
if rank==0:
    tx,ty = cifar10.train.images,cifar10.train.labels
    train_size =  len(tx)
    tx,ty=comm.bcast((tx,ty),root=0)
else:
    tx,ty=comm.bcast([],root=0)
bsize=6000
start = time.time()
totaltime=0