Ejemplo n.º 1
0
    def __init__(self, data, label, num_features, num_classes):

        hidden1, _, _ = layers.fc(data,
                                  num_features,
                                  100,
                                  name='hidden1',
                                  log_weights=False)
        logits, _, _ = layers.fc(hidden1,
                                 100,
                                 num_classes,
                                 name='logits',
                                 relu=False,
                                 log_weights=False)
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
            labels=label, logits=logits)
        data_loss = tf.reduce_mean(cross_entropy)
        reg_loss = 1e-3 * tf.losses.get_regularization_loss()
        self._loss = tf.add(data_loss, reg_loss, name='data_and_reg_loss')
        global_step = tf.train.get_or_create_global_step()
        self._optimize = tf.train.RMSPropOptimizer(0.03).minimize(
            self._loss, global_step=global_step)
        self._prediction = tf.nn.softmax(logits)
        mistakes = tf.not_equal(tf.argmax(label, 1),
                                tf.argmax(self._prediction, 1))
        self._error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
Ejemplo n.º 2
0
def net(data, train=False, data_type=tf.float32):

    pool1 = layers.conv(data, 32, 'conv-1', data_type)
    pool2 = layers.conv(pool1, 64, 'conv-2', data_type)

    pool2_shape = pool2.get_shape().as_list()
    pool_to_fc = [
        pool2_shape[0], pool2_shape[1] * pool2_shape[2] * pool2_shape[3]
    ]
    reshape = tf.reshape(pool2, pool_to_fc)

    fc1, fc1_weights, fc1_biases = layers.fc(reshape, 512, 'fc1', data_type)
    fc1 = tf.nn.relu(fc1)
    if train:
        fc1 = layers.dropout(fc1, keep_prob)
    #tf.summary.histogram("fc1/relu", fc1)
    fc2_logits, fc2_weights, fc2_biases = layers.fc(fc1, 10, 'fc2', data_type)

    return [fc2_logits, fc1_weights, fc1_biases, fc2_weights, fc2_biases]
Ejemplo n.º 3
0
def net(data, train=False, data_type = tf.float16):
  pool1 = layers.conv(data, 32, 'conv-1')
  pool2 = layers.conv(pool1, 64, 'conv-2')

  data_shape = data.get_shape().as_list()
  data_to_fc = [data_shape[0], data_shape[1] * data_shape[2] * data_shape[3]]
  reshape = tf.reshape(data, data_to_fc)
  
  fc1, fc1_weights, fc1_biases = layers.fc(reshape, 512, 'fc1', data_type)
  # fc1 = tf.nn.relu(fc1)
  fc2, fc2_weights, fc2_biases = layers.fc(fc1, 512//2, 'fc2', data_type)
  # fc2 = tf.nn.relu(fc2)
  fc3, fc3_weights, fc3_biases = layers.fc(fc2, 512//4, 'fc3', data_type)
  # fc3 = tf.nn.relu(fc3)
  fc4, fc4_weights, fc4_biases = layers.fc(fc3, 10, 'fc4', data_type)

  # if train:
  #   fc1 = layers.dropout(fc1, keep_prob)
  #tf.summary.histogram("fc1/relu", fc1)

  return [fc4, fc1_weights, fc1_biases, fc2_weights, fc2_biases, fc3_weights, fc3_biases, fc4_weights, fc4_biases]
Ejemplo n.º 4
0
    def prediction(self):
        if self._prediction == None:
            data_dim = int(self.data.shape[1])
            layer_name = 'logits'
            fc1l, fc1w, fc1b = layers.fc(self.data,
                                         num_in=data_dim,
                                         units=5,
                                         name=layer_name,
                                         relu=False)
            tf.summary.histogram(layer_name, fc1l)
            self._prediction = fc1l

        return self._prediction
Ejemplo n.º 5
0
    def prediction(self):
        if self._prediction == None:
            data_dim = self.data.get_shape()[1].value
            layer_name = 'fc1'
            fc1l, fc1w, fc1b = layers.fc(self.data,
                                         num_in=data_dim,
                                         units=80,
                                         name=layer_name,
                                         relu=True)
            self.parameters += [fc1w, fc1b]

            layer_name = 'fc2'
            fc2l, fc2w, fc2b = layers.fc(fc1l,
                                         num_in=fc1l.get_shape()[1].value,
                                         units=80,
                                         name=layer_name,
                                         relu=True)
            self.parameters += [fc2w, fc2b]

            layer_name = 'fc3'
            fc3l, fc3w, fc3b = layers.fc(fc2l,
                                         num_in=fc2l.get_shape()[1].value,
                                         units=80,
                                         name=layer_name,
                                         relu=True)
            self.parameters += [fc3w, fc3b]

            layer_name = 'fc4'
            fc4l, fc4w, fc4b = layers.fc(fc3l,
                                         num_in=fc3l.get_shape()[1].value,
                                         units=80,
                                         name=layer_name,
                                         relu=True)
            self.parameters += [fc4w, fc4b]

            layer_name = 'fc5'
            fc5l, fc5w, fc5b = layers.fc(fc4l,
                                         num_in=fc4l.get_shape()[1].value,
                                         units=80,
                                         name=layer_name,
                                         relu=True)
            self.parameters += [fc5w, fc5b]

            layer_name = 'fc6'
            fc6l, fc6w, fc6b = layers.fc(fc5l,
                                         num_in=fc5l.get_shape()[1].value,
                                         units=80,
                                         name=layer_name,
                                         relu=True)
            self.parameters += [fc6w, fc6b]

            layer_name = 'fc7'
            fc7l, fc7w, fc7b = layers.fc(fc6l,
                                         num_in=fc6l.get_shape()[1].value,
                                         units=80,
                                         name=layer_name,
                                         relu=True)
            self.parameters += [fc7w, fc7b]

            layer_name = 'fc8'
            fc8l, fc8w, fc8b = layers.fc(fc7l,
                                         num_in=fc7l.get_shape()[1].value,
                                         units=10,
                                         name=layer_name,
                                         relu=False)
            self.parameters += [fc8w, fc8b]

            self._prediction = fc8l

        return self._prediction
    def make_coefficient_params(self, lowres):
        # splat params
        splat = []
        in_channels = self.n_in
        num_downsamples = int(np.log2(min(lowres) / self.spatial_bin))
        extra_convs = max(0, int(np.log2(self.spatial_bin) - np.log2(16)))
        extra_convs = np.linspace(0,
                                  num_downsamples - 1,
                                  extra_convs,
                                  dtype=np.int).tolist()
        for i in range(num_downsamples):
            out_channels = (2**i) * self.feature_multiplier
            splat.append(
                conv(in_channels,
                     out_channels,
                     3,
                     stride=2,
                     norm=False if i == 0 else self.norm))
            if i in extra_convs:
                splat.append(
                    conv(out_channels, out_channels, 3, norm=self.norm))
            in_channels = out_channels
        splat = nn.Sequential(*splat)
        splat_channels = in_channels

        # global params
        global_conv = []
        in_channels = splat_channels
        for _ in range(int(np.log2(self.spatial_bin / 4))):
            global_conv.append(
                conv(in_channels,
                     8 * self.feature_multiplier,
                     3,
                     stride=2,
                     norm=self.norm))
            in_channels = 8 * self.feature_multiplier
        global_conv.append(nn.AdaptiveAvgPool2d(4))
        global_conv = nn.Sequential(*global_conv)
        global_fc = nn.Sequential(
            fc(128 * self.feature_multiplier,
               32 * self.feature_multiplier,
               norm=self.norm),
            fc(32 * self.feature_multiplier,
               16 * self.feature_multiplier,
               norm=self.norm),
            fc(16 * self.feature_multiplier,
               8 * self.feature_multiplier,
               norm=False,
               relu=False))

        # local params
        local = nn.Sequential(
            conv(splat_channels, 8 * self.feature_multiplier, 3),
            conv(8 * self.feature_multiplier,
                 8 * self.feature_multiplier,
                 3,
                 bias=False,
                 norm=False,
                 relu=False))

        # prediction params
        prediction = conv(8 * self.feature_multiplier,
                          self.luma_bins * (self.n_in + 1) * self.n_out,
                          1,
                          norm=False,
                          relu=False)

        coefficient_params = nn.Module()
        coefficient_params.splat = splat
        coefficient_params.global_conv = global_conv
        coefficient_params.global_fc = global_fc
        coefficient_params.local = local
        coefficient_params.prediction = prediction
        return coefficient_params
Ejemplo n.º 7
0
def net(data, train=False, data_type=tf.float16):
  rec = layers.rec(reshapeData(data), n_size, data_type)
  fc1, fc1_weights, fc1_biases = layers.fc(rec[-1], 10, 'fc1', data_type)

  return [fc1, fc1_weights, fc1_biases]