Beispiel #1
0
    def display_grabvars(self, grabbed_vals, grabbed_vars, step=1):
        if self.dendrogramLayers:
            grabbed_vars = grabbed_vars[:len(self.wantedMapGrabvars)]
            grabbed_vals = grabbed_vals[:len(self.wantedMapGrabvars)]

        names = [x.name for x in grabbed_vars]
        msg = "Grabbed Variables at Step " + str(step)
        print("\n" + msg, end="\n")
        fig_index = 0
        for i, v in enumerate(grabbed_vals):
            if names: print("   " + names[i] + " = ", end="\n")
            if type(v) == np.ndarray and len(
                    v.shape) > 1:  # If v is a matrix, use hinton plotting
                if 'wgt' in names[i] and self.wgtMatrix:
                    TFT.display_matrix(v,
                                       fig=self.grabvar_figures[fig_index],
                                       title=names[i] + ' at step ' +
                                       str(step))
                else:
                    TFT.hinton_plot(v,
                                    fig=self.grabvar_figures[fig_index],
                                    title=names[i] + ' at step ' + str(step))
                fig_index += 1
            else:
                print(v, end="\n\n")
Beispiel #2
0
    def display_matrix(self, numCases):
        names = [x.name for x in self.grabvars]
        self.reopen_current_session()
        tCases = self.case_manager.get_training_cases()
        cases = []
        mapList = []
        for i in range(0, numCases):
            cases.append(tCases[i])
        inputs = [c[0] for c in cases]
        targets = [c[1] for c in cases]
        feeder = {self.input: inputs, self.target: targets}

        result = self.current_session.run([self.output, self.grabvars],
                                          feed_dict=feeder)

        for i, v in enumerate(result[1]):
            if type(v) == np.ndarray and len(
                    v.shape) > 1:  # If v is a matrix, use hinton plotting
                TFT.display_matrix(v,
                                   fig=self.grabvar_figures[i],
                                   title=names[i])
            else:
                print("\n\n")
                print(names[i])
                print(v, end="\n\n")

        self.close_current_session(view=False)
Beispiel #3
0
    def display_grabvars(self, grabbed_vals, grabbed_vars, step=1):
        names = [x.name for x in grabbed_vars]
        fig_index = 0
        for i, v in enumerate(grabbed_vals):
            if type(v) == np.ndarray and len(v.shape) > 1: # If v is a matrix
                pass
            elif type(v) == np.ndarray and len(v.shape) == 1: # if v is a vector (i.e. a bias vector)
                v = np.array([v]) # convert to matrix

            TFT.display_matrix(v, fig=None, title='Matrix plot of ' + names[i] + ' at step ' + str(step))
            fig_index += 1
Beispiel #4
0
    def do_mapping(self,
                   session=None,
                   scatter=True,
                   mbs=100,
                   testset="mapping",
                   mapbs=10,
                   numeric=False):

        sess = session if session else self.current_session
        grabvars = [self.input, self.predictor]

        grabvars += self.mapvars

        randNum = random.randint(0, len(self.training_cases) - mapbs)
        cases = self.training_cases[randNum:randNum + mapbs]

        inputs = [c[0] for c in cases]
        targets = [c[1] for c in cases]
        predictions = []

        feeder = {self.input: inputs, self.target: targets}
        _, grabvals, _ = self.run_one_step([self.predictor],
                                           grabvars,
                                           session=sess,
                                           feed_dict=feeder,
                                           show_interval=None)
        for val in grabvals[1]:
            predictions.append(val)

        fig_index = 0
        names = [x.name for x in grabvars[2:-len(self.dendrogram_layers)]]
        for grabval in grabvals[2:-len(self.dendrogram_layers)]:

            if (type(grabval[0]) != np.ndarray):
                grabval = np.array([[c] for c in grabval])

            if numeric:
                TFT.display_matrix(grabval,
                                   fig=self.mapvar_figures[fig_index],
                                   title=names[fig_index])
            else:
                TFT.hinton_plot(grabval,
                                fig=self.mapvar_figures[fig_index],
                                title=names[fig_index])
            fig_index += 1

        input_strings = [TFT.bits_to_str(i) for i in inputs]
        target_strings = [TFT.bits_to_str(i) for i in targets]
        for dendro_vals in grabvals[-len(self.dendrogram_layers):]:

            TFT.dendrogram(dendro_vals, target_strings)

        a = input()
Beispiel #5
0
 def display_grabvars(self, grabbed_vals, grabbed_vars, step=1):
     names = [x.name for x in grabbed_vars]
     msg = "Grabbed Variables at Step " + str(step)
     print("\n" + msg, end="\n")
     for i, v in enumerate(grabbed_vals):
         if names:
             print("   " + names[i] + " = ", end="\n")
         if type(v) == np.ndarray and len(v.shape) > 1:  # If v is a matrix, use hinton plotting
             TFT.hinton_plot(v, title=names[i] + ' at step ' + str(step))
             TFT.display_matrix(v, title=names[i] + ' at step ' + str(step))
         else:
             print(v, end="\n\n")
Beispiel #6
0
    def do_mapping(self, sess, cases):
        #Separate the cases into inputs and targets
        inputs = [c[0] for c in cases]
        targets = [c[1] for c in cases]

        #Getting the value for the label
        tar = []
        for t in targets:
            tar.append(list(t).index(1))

        #Running the network without learning on the map batch
        feeder = {self.input: inputs, self.target: targets}
        testres, grabvals, _ = self.run_one_step(self.predictor,
                                                 self.grabvars,
                                                 self.probes,
                                                 session=sess,
                                                 feed_dict=feeder,
                                                 show_interval=None)

        #Putting the names into an array for easier look up
        names = [x.name for x in self.grabvars]
        zips = zip(names, grabvals)
        #Variables to show which
        num = 0
        num2 = 0
        #Plotting the different plots
        for i in zips:
            if (i[0] in self.Hnames):
                TFT.hinton_plot(i[1],
                                fig=PLT.figure(),
                                title=i[0] + ' at step ' + str("Test"))
            if (i[0] in self.Dnames):
                TFT.dendrogram(i[1], tar, title="dendrogram" + str(i[0]))
            if (i[0] in self.wnames):
                fig_wgt = PLT.figure()
                TFT.display_matrix(i[1], fig=fig_wgt, title=(i[0]))
                num += 1
            if (i[0] in self.bnames):
                fig_bias = PLT.figure()
                TFT.display_matrix(np.array([i[1]]),
                                   fig=fig_bias,
                                   title=(i[0]))
                num2 += 1
Beispiel #7
0
def train(dims=[11,40,20,6],
          activation_func='tanh',
          softmax=True,
          cost_func=CE,
          lr= 0.5,
          vint = 10,
          bint = 10,
          acc_lim = 0.95,
          initial_weight_range=[-0.1,0.1],
          data_source='gen_wine_cases',
          case_count=1,
          vfrac=0.1,
          tfrac=0.1,
          mbs=1277,
          map_bs=20,
          epochs=10000,
          show_layers=None,
          dendogram_layers=None,
          show=True,
          map_layers = [1, 2]):


  #Training and validation accuracies
  train_acc= []
  val_acc = []
    
  # Import data
  dataset = getattr(TFT,data_source)(case_count=case_count)
  mnist = case_holder(dataset,tfrac=tfrac,vfrac=vfrac)

  sess = tf.InteractiveSession()
  # Create a multilayer model.

  # Input placeholders
  with tf.name_scope('input'):
    x = tf.placeholder(tf.float32, [None, dims[0]], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, dims[-1]], name='y-input')


  # We can't initialize these variables to 0 - the network will get stuck.
  def weight_variable(shape):
    """Create a weight variable with appropriate initialization."""
    if initial_weight_range == "scaled":
        initial = tf.truncated_normal(shape, stddev=0.1)
    else:
        initial = tf.Variable(tf.random_uniform(shape=shape,minval=initial_weight_range[0],maxval=[initial_weight_range[1]]))
    return tf.Variable(initial)

  def bias_variable(shape):
    """Create a bias variable with appropriate initialization."""
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

  def variable_summaries(var):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
    with tf.name_scope('summaries'):
      mean = tf.reduce_mean(var)
      tf.summary.scalar('mean', mean)
      with tf.name_scope('stddev'):
        stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
      tf.summary.scalar('stddev', stddev)
      tf.summary.scalar('max', tf.reduce_max(var))
      tf.summary.scalar('min', tf.reduce_min(var))
      tf.summary.histogram('histogram', var)

  def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=getattr(tf.nn,activation_func)):
    """Reusable code for making a simple neural net layer.
    It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.
    It also sets up name scoping so that the resultant graph is easy to read,
    and adds a number of summary ops.
    """
    # Adding a name scope ensures logical grouping of the layers in the graph.
    with tf.name_scope(layer_name):
      # This Variable will hold the state of the weights for the layer
      with tf.name_scope('weights'):
        weights = weight_variable([input_dim, output_dim])
        variable_summaries(weights)
      with tf.name_scope('biases'):
        biases = bias_variable([output_dim])
        variable_summaries(biases)
      with tf.name_scope('Wx_plus_b'):
        preactivate = tf.matmul(input_tensor, weights) + biases
        tf.summary.histogram('pre_activations', preactivate)
      activations = act(preactivate, name='activation')
      tf.summary.histogram('activations', activations)
      return activations



  previous_layer = x
  layers = []
  for i in range(1,len(dims)):
      layers.append(nn_layer(previous_layer,dims[i-1],dims[i],'layer'+str(i),act=tf.nn.relu))
      previous_layer = layers[-1]
  y = layers[-1]



  with tf.name_scope('error_func'):
    # The raw formulation of cross-entropy,
    #
    # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),
    #                               reduction_indices=[1]))
    #
    # can be numerically unstable.
    #
    # So here we use tf.nn.softmax_cross_entropy_with_logits on the
    # raw outputs of the nn_layer above, and then average across
    # the batch.
    diff = error_funcs[cost_func](y_,y)
    #diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
    with tf.name_scope('total'):
      cross_entropy = tf.reduce_mean(diff)
  tf.summary.scalar('cross_entropy', cross_entropy)

  with tf.name_scope('train'):
    train_step = tf.train.AdamOptimizer(learning_rate=lr).minimize(
        cross_entropy)

  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    with tf.name_scope('accuracy'):
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', accuracy)

  # Merge all the summaries and write them out to
  # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
  merged = tf.summary.merge_all()
  train_writer = tf.summary.FileWriter('netsaver_test'+ '/train', sess.graph)
  test_writer = tf.summary.FileWriter('netsaver_test' + '/test')
  tf.global_variables_initializer().run()

  # Train the model, and also write summaries.
  # Every 10th step, measure test-set accuracy, and write test summaries
  # All other steps, run train_step on training data, & add training summaries

  def feed_dict(train):
    """Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
    if train == "train":
      xs, ys = mnist.train_next_batch(size=mbs)
    elif train == 'test':
      xs, ys = mnist.test_features, mnist.test_labels
    elif train == 'val':
        xs, ys = mnist.validation_features, mnist.validation_labels
    elif train == 'map':
        xs, ys = mnist.train_features[:map_bs], mnist.train_labels[:map_bs]
    else:
        raise Exception
    return {x: xs, y_: ys}

  for i in range(epochs):
    if i % bint == 0:  # Record summaries and test-set accuracy
      summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict('train'))
      test_writer.add_summary(summary, i)
      print('Accuracy at step %s: %s' % (i, acc))

      # Own code for pulling training accuracy to matplot graph
      train_acc.append([i,acc])

      if acc >= acc_lim: break
    else:  # Record train set summaries, and train
      if i % 100 == 99:  # Record execution stats
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        summary, _ = sess.run([merged, train_step],
                              feed_dict=feed_dict('train'),
                              options=run_options,
                              run_metadata=run_metadata)
        train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
        train_writer.add_summary(summary, i)
        print('Adding run metadata for', i)
      else:  # Record a summary
        summary, _ = sess.run([merged, train_step], feed_dict=feed_dict('train'))
        train_writer.add_summary(summary, i)



  train_writer.close()
  test_writer.close()


  # Display final test scores
  summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict('train'))
  print('Final training set accuracy: %s' % ( acc))


  # Code for displaying graphs

  if show:
    TFT.plot_training_history(train_acc,val_acc)

  if map_layers:
      for l in map_layers:
          _, activation = sess.run([merged,layers[l]],feed_dict=feed_dict('map'))
          TFT.display_matrix(activation, title="mapping of layer: "+ str(l))
      # for variable in tf.trainable_variables():
      #     if variable.name in real-map_layer:
              # _,values = sess.run([merged,variable],feed_dict=feed_dict('map'))
              # if 'weigths' in variable.name:
              #     TFT.display_matrix(values)
              # elif 'biases' in variable.name:
              #     TFT.display_vector(values)
              # else:
              #     raise Exception("wrong dimensionality on show layers")

  if show_layers:
      for variable in tf.trainable_variables():
          if variable.name in show_layers:
              _,values = sess.run([merged,variable],feed_dict=feed_dict('map'))
              if len(values.shape) == 2:
                  TFT.display_matrix(values, title="weights of: "+variable.name)
              elif len(values.shape) == 1:
                  TFT.display_vector(values, title="biases of: "+variable.name)
              else:
                  raise Exception("wrong dimensionality on map layers")

  if dendogram_layers:
      for l in dendogram_layers:
          _, activation = sess.run([merged,layers[l]],feed_dict=feed_dict('map'))
          y_s = []
          #for y in feed_dict('map')[x]:
          #    y_s.append(TFT.segmented_vector_string(y))
          TFT.dendrogram(activation,feed_dict('map')[y_], title="Dendogram, layer: "+str(l))



  PLT.show()