Ejemplo n.º 1
0
def inputs(train=True):
  """Construct input for evaluation using the Reader ops.
  Args:
    eval_data: bool, indicating if one should use the train or eval data set.
  Raises:
    ValueError: if no data_dir
  Returns:
    images: Images. 3D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  if train:
    filenames = [os.path.join(FLAGS.data_dir, 'train.csv') for _ in xrange(1000)]
  else:
    filenames = [os.path.join(FLAGS.data_dir, 'test1.csv'),
                 os.path.join(FLAGS.data_dir, 'test2.csv')]
  filename_queue = tf.train.string_input_producer(filenames)

  read_input = input.read(filename_queue)
  reshaped_image = tf.cast(read_input.image, tf.float32)
  height = IMAGE_SIZE
  width = IMAGE_SIZE
  # Image processing for evaluation.
  # Crop the central [height, width] of the image.
  resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
                                                         width, height)
  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_whitening(resized_image)
  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = 0.4
  num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN if train else NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
  min_queue_examples = int(num_examples_per_epoch *
                           min_fraction_of_examples_in_queue)
  # Generate a batch of images and labels by building up a queue of examples.
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples)
def recognizeFile(models, file, translate='', rotate='', scale=''):
    """!
    Match a single file and return the resulting scores as well as the
    normalization parameters.

    @param models list: The previously trained HMM models
    @param file String: The file containing the motion.
    @param translate String: The normalization type for correcting translation
    @param rotate String: The normalization type for correcting rotation
    @param scale String: The normalization type for correcting scaling
    @return An array of the model scores, translation, rotation, scaling parameters
    """
    #print(file)
    #read motion and normalize
    motion = input.read(file)
    motion,t,r,s = normalization.normalize(motion, translate, rotate, scale)

    plot.addPlot(motion[:,1:4], file)
    #writePointsToGrapherFile(motion, file)

    scores = []
    # check motion score (likelyhood) for each recording
    for i,model in enumerate(models):
        scores.append(float(model.score(motion)))
    return numpy.array(scores), t, r, s
Ejemplo n.º 3
0
def distorted_inputs():
    """Construct distorted input for CIFAR training using the Reader ops.
  Raises:
    ValueError: if no data_dir
  Returns:
    images: Images. 3D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """

    filenames = [
        os.path.join(FLAGS.data_dir, 'train.csv') for _ in xrange(10000)
    ]
    filename_queue = tf.train.string_input_producer(filenames)

    read_input = input.read(filename_queue)
    reshaped_image = tf.cast(read_input.image, tf.float32)
    height = IMAGE_SIZE
    width = IMAGE_SIZE
    # Image processing for training the network. Note the many random
    # distortions applied to the image.
    # Randomly crop a [height, width] section of the image.

    distorted_image = tf.image.random_crop(reshaped_image, [height, width])
    #distorted_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
    #                                                       width, height)

    # Randomly flip the image horizontally.
    distorted_image = tf.image.random_flip_left_right(distorted_image)
    # Because these operations are not commutative, consider randomizing
    # randomize the order their operation.
    #distorted_image = tf.image.random_brightness(distorted_image,
    #                                             max_delta=10)

    brightness_adj = random.randint(-20, 20)
    distorted_image = tf.image.adjust_brightness(distorted_image,
                                                 brightness_adj,
                                                 min_value=1,
                                                 max_value=255)

    contrast_adj = random.uniform(0.85, 1.15)
    distorted_image = tf.image.adjust_contrast(distorted_image,
                                               contrast_adj,
                                               min_value=1,
                                               max_value=255)
    #distorted_image = tf.image.random_contrast(distorted_image,
    #                                           lower=0.7, upper=1.3)
    # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.image.per_image_whitening(distorted_image)
    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                             min_fraction_of_examples_in_queue)
    print('Filling queue with %d images before starting to train. '
          'This will take a few minutes.' % min_queue_examples)
    # Generate a batch of images and labels by building up a queue of examples.
    return _generate_image_and_label_batch(float_image, read_input.label,
                                           min_queue_examples)
Ejemplo n.º 4
0
def distorted_inputs():
  """Construct distorted input for CIFAR training using the Reader ops.
  Raises:
    ValueError: if no data_dir
  Returns:
    images: Images. 3D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """

  filenames = [os.path.join(FLAGS.data_dir, 'train.csv') for _ in xrange(10000)]
  filename_queue = tf.train.string_input_producer(filenames)

  read_input = input.read(filename_queue)
  reshaped_image = tf.cast(read_input.image, tf.float32)
  height = IMAGE_SIZE
  width = IMAGE_SIZE
  # Image processing for training the network. Note the many random
  # distortions applied to the image.
  # Randomly crop a [height, width] section of the image.

  distorted_image = tf.image.random_crop(reshaped_image, [height, width])
  #distorted_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
  #                                                       width, height)

  # Randomly flip the image horizontally.
  distorted_image = tf.image.random_flip_left_right(distorted_image)
  # Because these operations are not commutative, consider randomizing
  # randomize the order their operation.
  #distorted_image = tf.image.random_brightness(distorted_image,
  #                                             max_delta=10)

  brightness_adj = random.randint(-20, 20)
  distorted_image = tf.image.adjust_brightness(distorted_image,
                                               brightness_adj,
                                               min_value=1,
                                               max_value=255)

  contrast_adj = random.uniform(0.85, 1.15)
  distorted_image = tf.image.adjust_contrast(distorted_image,
                                             contrast_adj,
                                             min_value=1,
                                             max_value=255)
  #distorted_image = tf.image.random_contrast(distorted_image,
  #                                           lower=0.7, upper=1.3)
  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_whitening(distorted_image)
  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = 0.4
  min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                           min_fraction_of_examples_in_queue)
  print ('Filling queue with %d images before starting to train. '
         'This will take a few minutes.' % min_queue_examples)
  # Generate a batch of images and labels by building up a queue of examples.
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples)
def plotFile(file, translate='', rotate='', scale=''):
    """!
    Read a single motion from a file and add it to the current plot list.

    @param file String: The file containing the motion.
    @param translate String: The normalization type for correcting translation
    @param rotate String: The normalization type for correcting rotation
    @param scale String: The normalization type for correcting scaling
    """
    #read motion and normalize
    motion = input.read(file)
    motion,t,r,s = normalization.normalize(motion, translate, rotate, scale)

    plot.addPlot(motion[:,1:4], file)
Ejemplo n.º 6
0
def readNormalized(file, translate='', rotate='', scale='', clean=True):
    """!
    Read a motion from a file path and apply normalization to it.

    @param file String: The file path of the motion
    @param translate: The normalization for translating the motions
    @param rotate: The normalization for rotating the motions
    @param scale: The normalization for scaling the motions
    @param cleaning: Remove duplicate points and large jumps, default true
    @return: The normalized motion and the normalization parameters
    """
    motion = read(file)
    motion, t, r, s = normalize(motion, translate, rotate, scale, clean)
    return motion, t, r, s
def createMotionsAndLengths(path, translate='', rotate='', scale=''):
    """!
    Read the motions from a folder and create a concatenated array with the lengths.

    The given directory 'path' must contain a subdirectory 'training' containing the motions
    as individual csv files.

    @param path String: The path to the motion data.
    @param translate: The normalization for translating the motions
    @param rotate: The normalization for rotating the motions
    @param scale: The normalization for scaling the motions
    @return: The concatenated motions and a list of the motion lengths
    """
    # list of motions read from the file
    motions = []
    # length of motions read from the file
    lengths = []
    count = 0
    plot.clearPlot()
    input.logLn('\n- ' + '{:<10}'.format(path + ':'))
    # read all files from directory associated with one motion
    for file in sorted(glob.glob(path + '/training/*.csv')):
        print(file)
        input.logLn(string.basename(string.splitext(file)[0]))
        count += 1
        #read motion and normalize
        motion = input.read(file)
        motion,t,r,s = normalization.normalize(motion, translate, rotate, scale)

        # Add to plot all training plots
        plot.addPlot(motion[:,1:4], file)

        # Add motion to the list of motions
        motions.append(motion)
        # Add length (number of poses in motion) to the list of lengths
        lengths.append(len(motion))
    _, folderName = string.split(string.dirname(path))
    # Plot all training motions
    plot.plot('../plots/' + folderName + ' training')
    # The observations are a list of poses
    X = numpy.concatenate(motions)
    return X, lengths
Ejemplo n.º 8
0
def inputs(train=True):
    """Construct input for evaluation using the Reader ops.
  Args:
    eval_data: bool, indicating if one should use the train or eval data set.
  Raises:
    ValueError: if no data_dir
  Returns:
    images: Images. 3D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
    if train:
        filenames = [
            os.path.join(FLAGS.data_dir, 'train.csv') for _ in xrange(1000)
        ]
    else:
        filenames = [
            os.path.join(FLAGS.data_dir, 'test1.csv'),
            os.path.join(FLAGS.data_dir, 'test2.csv')
        ]
    filename_queue = tf.train.string_input_producer(filenames)

    read_input = input.read(filename_queue)
    reshaped_image = tf.cast(read_input.image, tf.float32)
    height = IMAGE_SIZE
    width = IMAGE_SIZE
    # Image processing for evaluation.
    # Crop the central [height, width] of the image.
    resized_image = tf.image.resize_image_with_crop_or_pad(
        reshaped_image, width, height)
    # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.image.per_image_whitening(resized_image)
    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN if train else NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
    min_queue_examples = int(num_examples_per_epoch *
                             min_fraction_of_examples_in_queue)
    # Generate a batch of images and labels by building up a queue of examples.
    return _generate_image_and_label_batch(float_image, read_input.label,
                                           min_queue_examples)
Ejemplo n.º 9
0
        error = sum_error(sigma)

        mses.append(error)
        if mses[-1] < 10:
            return mses, wo, wh

        sigma_o = np.dot(sigma.T, net_h)  # == > 1 * 10

        sigma_h = np.dot((np.dot(sigma, wo) * sigmoid_dash(net_h_active)).T, x)  # == > 10 * 8

        wo = wo + 0.0001 * sigma_o  # == > 1 * 10
        wh = wh + 0.0001 * sigma_h  # == > 10 * 8


if __name__ == '__main__':
    np.random.seed(1)
    m, l, n, x, y = read()
    # m ==> 8
    # l ==> 10
    # n ==> 1
    wh = np.random.uniform(low=-5, high=5, size=(l, m))  # 10 * 8
    wo = np.random.uniform(low=-5, high=5, size=(n, l))  # 1 * 10
    x = normalize_all(x)
    y = normalize(y)
    mses, wo, wh = backpropagation(x, y, wo, wh)
    save(wo, wh)
    plt.plot(mses)
    print(mses[-1])
    plt.show()
Ejemplo n.º 10
0
                   flags=['multi_index'],
                   op_flags=['readwrite']) as it:
        for cell in it:
            x = it.multi_index[0]
            y = it.multi_index[1]
            values[x, y] = space[sorted(list(range(y, y + size)) * size),
                                 list(range(x, x + size)) * size].sum()
    return values


def part2(input):
    serial_num = int(input)
    space = np.empty((300, 300), dtype=np.int8)
    assign_power(space, serial_num)

    max_powers = np.arange(1, 300)
    with np.nditer(max_powers, flags=['f_index'],
                   op_flags=['readwrite']) as it:
        for p in it:
            max_powers[it.index] = max_square(space,
                                              max_powers[it.index]).max()
    return


if __name__ == '__main__':
    import input as inp
    DAY = 11
    input = inp.read(DAY)
    print(part1(input))
    # print(part2(input))
    # print(part1('42'))
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
#
import numpy

import input
import plot
import glob

import clean

#for file in sorted(glob.glob('../recordings/writing/training/*.csv')):
motion = input.read('../recordings/wiping/translated/17-09-01 16-47-01.csv')
motion, _ = clean.removeDoublePoints(motion)
newMotion, count = clean.removeInvalidParts(motion)
print('Removed ' + str(count) + ' points')
print('New length is ', len(newMotion[:, 0]))
plot.addPlot(motion[:, 1:4], "Original")
plot.addPlot(newMotion[:, 1:4], "Cleaned")
plot.plot()

#
# motion = input.read('all/17-09-01 16-44-48.csv')
# motion2 = input.read('all/17-09-01 16-44-51.csv')
#
#
# plot.addPlot(motion[:,1:4], 'Something')
# plot.addPlot(motion2[:,1:4], 'Something else')
# plot.plot()

# from os.path import basename