Exemplo n.º 1
0
def train_generator():
    n_iters = 100000
    print_every = 500
    plot_every = 500

    all_losses = []
    total_loss = 0

    categories, examples = data.inputs()
    n_categories = len(categories)
    rnn = GeneratorNN(n_categories, data.N_CHARS, 128, data.N_CHARS)

    start = time.time()

    for i in range(1, n_iters + 1):
        category, sample, category_tensor, sample_tensor = data.random_sample(
            categories, examples, one_hot_categories=True)
        target_tensor = autograd.Variable(data.target_encode(sample))
        output, loss = rnn.train(category_tensor, sample_tensor, target_tensor)
        total_loss += loss

        if i % print_every == 0:
            print('%s (%d %d%%) %.4f' %
                  (time_since(start), i, i / n_iters * 100, loss))

        if i % plot_every == 0:
            all_losses.append(total_loss / plot_every)
            total_loss = 0
Exemplo n.º 2
0
def evaluate(run_dir):
    with tf.Graph().as_default() as g:
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        eval_data = FLAGS.eval_data == 'valid'
        num_eval = md['%s_counts' % FLAGS.eval_data]

        model_fn = select_model(FLAGS.model_type)


        with tf.device(FLAGS.device_id):
            print('Executing on %s' % FLAGS.device_id)
            images, labels, _ = inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, train=not eval_data, num_preprocess_threads=FLAGS.num_preprocess_threads)
            logits = model_fn(md['nlabels'], images, 1, False)
            summary_op = tf.summary.merge_all()
            
            summary_writer = tf.summary.FileWriter(run_dir, g)
            saver = tf.train.Saver()
            
            if FLAGS.requested_step_seq:
                sequence = FLAGS.requested_step_seq.split(',')
                for requested_step in sequence:
                    print('Running %s' % sequence)
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step)
            else:
                while True:
                    print('Running loop')
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval)
                    if FLAGS.run_once:
                        break
                    time.sleep(FLAGS.eval_interval_secs)
Exemplo n.º 3
0
def evaluate(run_dir):
    with tf.Graph().as_default() as g:
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        #print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        eval_data = FLAGS.eval_data == 'valid'
        num_eval = md['%s_counts' % FLAGS.eval_data]

        model_fn = select_model(FLAGS.model_type)


        with tf.device(FLAGS.device_id):
            #print('Executing on %s' % FLAGS.device_id)
            images, labels, _ = inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, train=not eval_data, num_preprocess_threads=FLAGS.num_preprocess_threads)
            logits = model_fn(md['nlabels'], images, 1, False)
            summary_op = tf.summary.merge_all()
            
            summary_writer = tf.summary.FileWriter(run_dir, g)
            saver = tf.train.Saver()
            
            if FLAGS.requested_step_seq:
                sequence = FLAGS.requested_step_seq.split(',')
                for requested_step in sequence:
                    #print('Running %s' % sequence)
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step)
            else:
                while True:
                    #print('Running loop')
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval)
                    if FLAGS.run_once:
                        break
                    time.sleep(FLAGS.eval_interval_secs)
Exemplo n.º 4
0
def setup_model(args):
    """Sets up the model.

    Args:
        args: The program args.

    Returns:
        The model train operation.
    """
    model = getattr(models, args.model)
    inputs, targets = data.inputs(args.datadir, args.dataset, args.batchsize)
    return model(inputs, targets)
Exemplo n.º 5
0
def train(dataset, network, checkpoint_dir, batch_size=BATCH_SIZE,
          last_step=LAST_STEP, learning_rate=LEARNING_RATE, epsilon=EPSILON,
          beta1=BETA_1, beta2=BETA_2, dropout=DROPOUT,
          scale_inputs=SCALE_INPUTS, distort_inputs=DISTORT_INPUTS,
          zero_mean_inputs=ZERO_MEAN_INPUTS, display_step=DISPLAY_STEP,
          save_checkpoint_secs=SAVE_CHECKPOINT_SECS,
          save_summaries_steps=SAVE_SUMMARIES_STEPS):

    if not tf.gfile.Exists(checkpoint_dir):
        tf.gfile.MakeDirs(checkpoint_dir)

    with tf.Graph().as_default():

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        global_step_init = -1
        if ckpt and ckpt.model_checkpoint_path:
            global_step_init = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
            global_step = tf.Variable(global_step_init, name='global_step',
                                      dtype=tf.int64, trainable=False)
        else:
            global_step = tf.contrib.framework.get_or_create_global_step()

        data, labels = inputs(dataset, False, batch_size, scale_inputs,
                              distort_inputs, zero_mean_inputs, shuffle=True)

        keep_prob = tf.placeholder(tf.float32)
        logits = inference(data, network, keep_prob)
        loss = cal_loss(logits, labels)
        acc = cal_accuracy(logits, labels)

        train_op = train_step(
            loss, global_step, learning_rate, beta1, beta2, epsilon)

        try:
            with tf.train.MonitoredTrainingSession(
                    checkpoint_dir=checkpoint_dir,
                    save_checkpoint_secs=save_checkpoint_secs,
                    save_summaries_steps=save_summaries_steps,
                    hooks=hooks(display_step, last_step, batch_size, loss, acc)
                    ) as monitored_session:
                while not monitored_session.should_stop():
                    monitored_session.run(train_op,
                                          feed_dict={keep_prob: dropout})

        except KeyboardInterrupt:
            pass
Exemplo n.º 6
0
def train_lstm():
    n_hidden = 128
    n_iters = 100000
    print_every = 500
    plot_every = 100

    current_loss = 0
    all_losses = []

    categories, examples = data.inputs()
    n_categories = len(categories)
    lstm = LabellerLSTM(data.N_CHARS, 64, 64, n_categories, lr=0.1)

    start = time.time()

    for i in range(1, n_iters + 1):
        category, sample, category_tensor, sample_tensor = data.random_sample(
            categories, examples)
        pred, loss = lstm.train(category_tensor, sample_tensor)
        current_loss += loss

        # Print iter number, loss, name and guess
        if i % print_every == 0:
            guess, guess_i = data.decode_prediction(categories, pred)
            correct = '✓' if guess == category else '✗ (%s)' % category
            print('%d %d%% (%s) %.4f %s / %s %s' %
                  (i, i / n_iters * 100, time_since(start), loss, sample,
                   guess, correct))

        # Add current loss avg to list of losses
        if i % plot_every == 0:
            all_losses.append(current_loss / plot_every)
            current_loss = 0

    plot_losses(all_losses, save=True, fname='lstm_losses.png')
    plot_confusion(lstm,
                   categories,
                   examples,
                   fname='lstm_confusion.png',
                   reshape_for_minibatch=True)
Exemplo n.º 7
0
Arquivo: main.py Projeto: Cgboal/Stuff
def inputs():
	cmd = ""
	while (cmd.lower() != "quit"):
		
		cmd = raw_input (">>")
		
		if cmd.lower() == "help":
			print ("Available operators are: +, -, *, /, **, root, bin, mean, mode, median, factorial, quit")
		
		elif cmd.lower() == '+':
			add1 = raw_input("Enter first number ")
			add2 = raw_input("Enter second number ")
			print ("The sum of these two numbers is " + str(addition(add1, add2)))
		
		elif cmd.lower() == "-":
			sub1 = raw_input("Enter first number ")
			sub2 = raw_input("Enter second number ")
			print ("The difference of these two numbers is " + str(subtraction(sub1, sub2)))
		elif cmd.lower() == "*":
			multiply1 = raw_input("Enter first number ")
			multiply2 = raw_input("Enter second number ")
			print ("The product of these two numbers is " + str(multiplication(multiply1, multiply2)))
		
		elif cmd.lower() == "/":
			div1 = raw_input("Enter first number ")
			div2 = raw_input("Enter second number ")
			print ("The quotient of these two numbers is " + str(division(div1, div2)))
		
		elif cmd.lower() == "**":
			base = raw_input("Enter the base ")
			exponent = raw_input("Please enter the exponent ")
			print (str(power(base, exponent)))
		
		elif cmd.lower() == "root":
			root1 = raw_input("Enter a number ")
			print ("The square root of that number is " + str(root(root1)))
		
		elif cmd.lower() == "mean":
			print data.mean(data.inputs())
		
		elif cmd.lower() == "median":
			print data.median(data.inputs())
		
		elif cmd.lower() == "mode":
			print data.mode(data.inputs())
		
		elif cmd.lower() == "deviation":
			print data.deviation(data.inputs())
		
		elif cmd.lower() == "encrypt":
			encrypt.main(cmd)
		
		elif cmd.lower() == "decrypt":
			encrypt.main(cmd)

		elif cmd.lower() == "bin":
			num1 = raw_input("Enter a number to convert to binary ")
			print bin_convert(num1)
		elif cmd.lower() == "factorial":
			print factorial.factorial(factorial.inputs())

		elif cmd.lower() == "quit":
			pass                      
		else:
			print "input not valid, try again"
	
	sys.exit()
def evaluate(dataset, network, checkpoint_dir, eval_dir, batch_size=BATCH_SIZE,
             scale_inputs=SCALE_INPUTS, distort_inputs=DISTORT_INPUTS,
             zero_mean_inputs=ZERO_MEAN_INPUTS, eval_data=EVAL_DATA):

    if not tf.gfile.Exists(checkpoint_dir):
        raise ValueError('Checkpoint directory {} doesn\'t exist.'
                         .format(checkpoint_dir))

    if tf.gfile.Exists(eval_dir):
        tf.gfile.DeleteRecursively(eval_dir)
    tf.gfile.MakeDirs(eval_dir)

    with tf.Graph().as_default() as g:
        data, labels = inputs(dataset, eval_data, batch_size, scale_inputs,
                              distort_inputs, zero_mean_inputs, num_epochs=1,
                              shuffle=False)

        keep_prob = tf.placeholder(tf.float32)
        logits = inference(data, network, keep_prob)
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        init_op = [tf.global_variables_initializer(),
                   tf.local_variables_initializer()]

        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(eval_dir, g)

        with tf.Session() as sess:
            sess.run(init_op)

            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                path = ckpt.model_checkpoint_path

                # Restore from checkpoint.
                saver.restore(sess, path)

                # Assuming model_checkpoint_path looks something like
                # /my-favorite-path/model.ckpt-0, extract global step from it.
                global_step = path.split('/')[-1].split('-')[-1]

            else:
                print('No checkpoint file found.')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            true_count = 0

            if eval_data:
                total_count = dataset.num_examples_per_epoch_for_eval
            else:
                total_count = dataset.num_examples_per_epoch_for_train_eval

            num_examples = 0

            try:
                while(True):
                    predictions = sess.run([top_k_op],
                                           feed_dict={keep_prob: 1.0})
                    true_count += np.sum(predictions)
                    num_examples += batch_size
                    num_examples = min(num_examples, total_count)

                    percentage = 100.0 * num_examples / total_count
                    sys.stdout.write('\r>> Calculating accuracy {:.1f}%'
                                     .format(percentage))
                    sys.stdout.flush()

            except (KeyboardInterrupt, tf.errors.OutOfRangeError):
                pass

            finally:
                precision = 100.0 * true_count / total_count

                print('')
                print('Accuracy: {:.2f}%'.format(precision))

                summary = tf.Summary()
                summary.ParseFromString(sess.run(summary_op))
                summary.value.add(tag='Precision', simple_value=precision)
                summary_writer.add_summary(summary, global_step)

                coord.request_stop()
                coord.join(threads)