예제 #1
0
파일: units.py 프로젝트: OwaJawa/morb
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self] - vmap[self.flipped_units])
예제 #2
0
파일: units.py 프로젝트: OwaJawa/morb
 def success_probability_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])
예제 #3
0
파일: units.py 프로젝트: OwaJawa/morb
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])
예제 #4
0
파일: units.py 프로젝트: OwaJawa/morb
 def sample_from_activation(self, vmap):
     p = activation_functions.sigmoid(vmap[self] - vmap[self.flipped_units])
     return samplers.bernoulli(p)
예제 #5
0
def apply_convolution(args, data_set_x, random_filters={}, normalisation={}):
  ############################################
  # APPLY FIRST LAYER CONVO
  ############################################

  if args.random_filters:
    random_filters_rng = np.random.RandomState(args.random_filters_seed)

  if args.convolution_type == "fullnoborder":
    print "   Removing neighbourhoods:"
    print "     before:            ", data_set_x.shape

    # cut borders
    margin = (data_set_x.shape[2] - pic_w) / 2
    data_set_x = data_set_x[:,:,(margin):(pic_w + margin),(margin):(pic_w + margin)]

    print "     after:             ", data_set_x.shape

  for prev_layer in args.previous_layer:
    print ">> Processing layer: ", prev_layer

    data_set_x_conv_collect = []

    for fname in prev_layer.split(","):
      print "  >> Filter set:   ", fname
      with open(fname, "r") as f:
        prev_layer_params = cPickle.load(f)

        prev_W = prev_layer_params["W"]
        prev_bh = prev_layer_params["bh"]

        print "       prev_W.shape:  ", prev_W.shape
        print "       prev_bh.shape: ", prev_bh.shape

        if args.random_filters:
          print "     Replacing filters by random values..."
          if fname in random_filters:
            prev_W  = random_filters[fname]["prev_W"]
            prev_bh = random_filters[fname]["prev_bh"]
          else:
            prev_W = np.array(random_filters_rng.uniform(low=-1, high=1, size=prev_W.shape), dtype=prev_W.dtype)
            prev_bh = np.array(random_filters_rng.uniform(low=-1, high=1, size=prev_bh.shape), dtype=prev_bh.dtype)
            random_filters[fname] = { "prev_W": prev_W, "prev_bh": prev_bh }

        print "     Compiling convolution..."

        V = T.dtensor4()
        W = T.dtensor4()
        bh = T.dvector()
        W_flipped = W[:, :, ::-1, ::-1]
        subsample = (args.subsample, args.subsample)
        reshaped_bh = bh.dimshuffle('x',0,'x','x')
        if args.convolution_type == "fullnoborder":
          c = conv.conv2d(V, W_flipped, border_mode="valid", subsample=subsample)
        else:
          c = conv.conv2d(V, W_flipped, border_mode="full", subsample=subsample)
        c_act = activation_functions.sigmoid(c + reshaped_bh)
        if args.skip_sigmoid:
          conv_f = theano.function([V, W, bh], [ c ], on_unused_input="ignore")
        else:
          conv_f = theano.function([V, W, bh], [ c_act ])

        print "     Applying convolution..."

        start_time = time.time()
        data_set_x_conv = None
        n_samples = data_set_x.shape[0]
        batch_size = 5
        for i in xrange(0, n_samples, batch_size):
          cvf = conv_f(data_set_x[i:min(i+batch_size, n_samples), :,:,:], prev_W, prev_bh)[0]
          if data_set_x_conv is None:
            s = np.array(cvf.shape)
            s[0] = data_set_x.shape[0]
            data_set_x_conv = np.zeros(s, dtype=cvf.dtype)
          data_set_x_conv[i:min(i+batch_size, n_samples), :,:,:] = cvf
          if i % 10 == 0 and i > 0:
            print "     %d     %0.2f/s" % (i, float(i) / (time.time() - start_time))

        # release
        prev_layer_params = None
        prev_W = None
        prev_bh = None
        conv_f = None
        cvf = None

        print "     After this layer:"
        print "       data_set_x_conv:   ", data_set_x_conv.shape

        data_set_x_conv_collect.append(data_set_x_conv)

        # release
        data_set_x_conv = None

        # garbage collection
        gc.collect()

    if len(data_set_x_conv_collect) == 1:
      data_set_x = data_set_x_conv_collect[0]
    else:
      data_set_x = np.concatenate(data_set_x_conv_collect, axis=1)

    # release
    data_set_x_conv_collect = None

    # garbage collection
    gc.collect()

    print "   After this layer:"
    print "     data_set_x:        ", data_set_x.shape

    if args.global_normalisation:
      # normalise / whiten
      if prev_layer in normalisation:
        mu = normalisation[prev_layer]["mu"]
        sigma = normalisation[prev_layer]["sigma"]
      else:
        print "   Calculating normalisation parameters"
        mu = np.mean(data_set_x)
        sigma = np.std(data_set_x)
        normalisation[prev_layer] = { "mu": mu, "sigma": sigma }
      print "   Normalising layer output..."
      data_set_x -= mu
      data_set_x /= (0.25 * sigma)

    elif not args.skip_normalisation:
      # normalise / whiten
      print "   Normalising layer output..."
      n_samples = data_set_x.shape[0]
      data_set_rows = data_set_x.reshape(n_samples, -1)
      mu = np.mean(data_set_rows, axis=1).reshape(n_samples, 1, 1, 1)
      sigma = memory_efficient_std(data_set_rows).reshape(n_samples, 1, 1, 1)
      # sigma = np.std(data_set_rows, axis=1).reshape(n_samples, 1, 1, 1)
      data_set_x -= mu
      data_set_x /= (0.25 * sigma)
      # release
      data_set_rows = None

  # garbage collection
  gc.collect()


  # cut borders
  if args.convolution_type != "fullnoborder":
    margin = (data_set_x.shape[2] - pic_w) / 2
    data_set_x = data_set_x[:,:,(margin):(pic_w + margin),(margin):(pic_w + margin)]

    print "   After removing borders:"
    print "     data_set_x:        ", data_set_x.shape

  ############################################
  # END OF CONVOLUTION
  ############################################

  return (data_set_x, random_filters, normalisation)
예제 #6
0
 def success_probability_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])
예제 #7
0
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self] -
                                         vmap[self.flipped_units])
예제 #8
0
 def sample_from_activation(self, vmap):
     p = activation_functions.sigmoid(vmap[self] - vmap[self.flipped_units])
     return samplers.bernoulli(p)
예제 #9
0
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])