コード例 #1
0
def get_model(sparse_data, train_labels, is_training, tensor_in_sizes, num_classes = 10, scope = "mn16-", initializer = None, regularizer = None):
  strides = [1,1,1,1,1]
  padding = "SAME"
  dim = 5
  pooling_sizes = [1,2,2,2,1]
  batch_size = tensor_in_sizes[0]
  total_size = 1
  for i in range(1, len(tensor_in_sizes)): #skip batch size
    total_size = total_size * tensor_in_sizes[i]
  sd_converted = ld.create_sparse_data_to_direct_sparse(sparse_data, dim)
  ops = [None]*6
  d1 = 0.33
  net, tensor_in_sizes = ld.create_sparse_conv_layer(sd_converted, [3,3,3,1,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc1", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc2", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc3", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_pooling_layer(net, pooling_sizes, tensor_in_sizes, dim, 1)
  d2 = 0.33
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc4", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc5", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,16], tensor_in_sizes, strides, padding, dim, d2, "K-ABS", name = scope + "sc6", initializer=initializer)
  sd = ld.create_direct_sparse_to_dense(net, dim)
  sd_flat = tf.reshape(sd, [batch_size, total_size * 2])
  conv_out =  tf.layers.dropout(sd_flat, 0.5, name="dropout", training=is_training)
  fc512 = tf.layers.dense(conv_out, 1024, name="dense2")
  fc10 = tf.layers.dense(fc512, num_classes, name="dense1")
  #if train:
  sd_out = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=fc10, labels=train_labels, name = "softmax_loss"))
  p_sd_out = tf.nn.softmax(logits=fc10)
  return [sd_out, p_sd_out, ops]
コード例 #2
0
def get_model(sparse_data, train_labels, is_training, tensor_in_sizes, num_classes = 10, scope = "mn128-", initializer = None, regularizer = None):
  strides = [1,1,1,1,1]
  padding = "SAME"
  dim = 5 
  pooling_sizes = [1,2,2,2,1]
  dpooling_sizes = [2,2,2]
  batch_size = tensor_in_sizes[0]
  total_size = 1 
  for i in range(1, len(tensor_in_sizes)): #skip batch size
    total_size = total_size * tensor_in_sizes[i]
  ops = [None]*9
  sd_converted = ld.create_sparse_data_to_direct_sparse(sparse_data, dim)
  d1 = 0.02
  net, tensor_in_sizes = ld.create_sparse_conv_layer(sd_converted, [3,3,3,1,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc1", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc2", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc3", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_pooling_layer(net, pooling_sizes, tensor_in_sizes, dim, 0.06)
  d2 = 0.06
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc4", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc5", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc6", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_pooling_layer(net, pooling_sizes, tensor_in_sizes, dim, 0.18)
  d3 = 0.14
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,24], tensor_in_sizes, strides, padding, dim, d3, "K-RELU", name = scope + "sc7", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,24,24], tensor_in_sizes, strides, padding, dim, d3, "K-RELU", name = scope + "sc8", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,24,24], tensor_in_sizes, strides, padding, dim, d3, "K-RELU", name = scope + "sc9", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_pooling_layer(net, pooling_sizes, tensor_in_sizes, dim, 0.50)
  net = ld.create_direct_sparse_to_dense(net, dim)
  net = tf.reshape(net, [batch_size, 16, 16, 16, 24])
  #dense layers
  net = tf.layers.conv3d(inputs=net, filters=32, kernel_size=[3, 3, 3], padding="same", activation=tf.nn.relu, name = scope + "sc10", kernel_initializer=initializer, kernel_regularizer=regularizer)
  net = tf.layers.conv3d(inputs=net, filters=32, kernel_size=[3, 3, 3], padding="same", activation=tf.nn.relu, name = scope + "sc11", kernel_initializer=initializer, kernel_regularizer=regularizer)
  net = tf.layers.conv3d(inputs=net, filters=32, kernel_size=[3, 3, 3], padding="same", activation=tf.nn.relu, name = scope + "sc12", kernel_initializer=initializer, kernel_regularizer=regularizer)
  net = tf.layers.max_pooling3d(inputs=net, pool_size=dpooling_sizes, strides=2, padding="same", name="dp1")
  net = tf.layers.conv3d(inputs=net, filters=40, kernel_size=[3, 3, 3], padding="same", activation=tf.nn.relu, name = scope + "sc13", kernel_initializer=initializer, kernel_regularizer=regularizer)
  net = tf.layers.conv3d(inputs=net, filters=40, kernel_size=[3, 3, 3], padding="same", activation=tf.nn.relu, name = scope + "sc14", kernel_initializer=initializer, kernel_regularizer=regularizer)
  n3 = net = tf.layers.conv3d(inputs=net, filters=40, kernel_size=[3, 3, 3], padding="same", name = scope + "sc15")
  net =  tf.layers.dropout(net, 0.5, name="dropout", training=is_training)
  net = tf.reshape(net, [batch_size, -1])
  net = tf.layers.dense(net, 1024)
  net = tf.layers.dense(net, num_classes)
  sd_out = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net, labels=train_labels, name = "softmax_loss"))
  p_sd_out = tf.nn.softmax(logits=net)

  
  return [sd_out, p_sd_out, ops]
コード例 #3
0
def get_model(sparse_data, train_labels, is_training, tensor_in_sizes, num_classes = 10, scope = "mn128-", initializer = None, regularizer = None):
  strides = [1,1,1,1,1]
  padding = "SAME"
  dim = 5 
  pooling_sizes = [1,2,2,2,1]
  dpooling_sizes = [2,2,2]
  batch_size = tensor_in_sizes[0]
  total_size = 1 
  for i in range(1, len(tensor_in_sizes)): #skip batch size
    total_size = total_size * tensor_in_sizes[i]
  ops = [None]*9
  sd_converted = ld.create_sparse_data_to_direct_sparse(sparse_data, dim)
  d1 = 0.14
  net, tensor_in_sizes = ld.create_sparse_conv_layer(sd_converted, [3,3,3,1,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc1", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc2", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,8], tensor_in_sizes, strides, padding, dim, d1, "K-RELU", name = scope + "sc3", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_pooling_layer(net, pooling_sizes, tensor_in_sizes, dim, min(1, 6 * d1))
  d2 = 0.33
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,8,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc4", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc5", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,16], tensor_in_sizes, strides, padding, dim, d2, "K-RELU", name = scope + "sc6", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_pooling_layer(net, pooling_sizes, tensor_in_sizes, dim, min(1, 6 * d2))
  d3 = 0.66
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,16,24], tensor_in_sizes, strides, padding, dim, d3, "K-RELU", name = scope + "sc7", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,24,24], tensor_in_sizes, strides, padding, dim, d3, "K-RELU", name = scope + "sc8", initializer=initializer)
  net, tensor_in_sizes = ld.create_sparse_conv_layer(net, [3,3,3,24,24], tensor_in_sizes, strides, padding, dim, d3, "K-ABS", name = scope + "sc9", initializer=initializer)
  net = ld.create_direct_sparse_to_dense(net, dim)
  net = tf.reshape(net, [batch_size, 8, 8, 8, 24])
  #dense layers
  net =  tf.layers.dropout(net, 0.5, name="dropout", training=is_training)
  net = tf.reshape(net, [batch_size, -1])
  net = tf.layers.dense(net, 1024)
  net = tf.layers.dense(net, num_classes)
  sd_out = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net, labels=train_labels, name = "softmax_loss"))
  p_sd_out = tf.nn.softmax(logits=net)

  
  return [sd_out, p_sd_out, ops]
コード例 #4
0
def model_mnist(sparse_data,
                tensor_in_sizes,
                train_labels=None,
                num_classes=10,
                scope='mn256-',
                initializer=None,
                d1=0.1,
                d2=0.3,
                d3=0.4,
                rscale=0.02,
                max_bias=0.1):
    dim = 5
    strides = [1, 1, 1, 1, 1]
    padding = 'SAME'
    pooling_sizes = [1, 1, 2, 2, 1]
    batch_size = tensor_in_sizes[0]
    total_size = np.prod(tensor_in_sizes)

    net = {}
    ops = [None] * 6

    tmp_tin = tensor_in_sizes

    net['sd_converted'] = ld.create_sparse_data_to_direct_sparse(
        sparse_data, dim)
    net['conv1_1'], tmp_tin, ops[0] = ld.create_sparse_conv_layer_reg(
        net['sd_converted'], [1, 3, 3, 1, 8],
        tmp_tin,
        strides,
        padding,
        dim,
        d1,
        'K-RELU',
        name=scope + 'sc1',
        initializer=initializer,
        scale=rscale,
        bias_offset=max_bias)
    net['conv1_2'], tmp_tin, ops[1] = ld.create_sparse_conv_layer_reg(
        net['conv1_1'], [1, 3, 3, 8, 8],
        tmp_tin,
        strides,
        padding,
        dim,
        d1,
        'K-RELU',
        name=scope + 'sc2',
        initializer=initializer,
        scale=rscale,
        bias_offset=max_bias)
    net['conv1_3'], tmp_tin, ops[2] = ld.create_sparse_conv_layer_reg(
        net['conv1_2'], [1, 3, 3, 8, 8],
        tmp_tin,
        strides,
        padding,
        dim,
        d1,
        'K-RELU',
        name=scope + 'sc3',
        initializer=initializer,
        scale=rscale,
        bias_offset=max_bias)
    net['pool1'], tmp_tin = ld.create_sparse_pooling_layer(
        net['conv1_3'], pooling_sizes, tmp_tin, dim, d3)
    net['conv2_1'], tmp_tin, ops[3] = ld.create_sparse_conv_layer_reg(
        net['pool1'], [1, 3, 3, 8, 16],
        tmp_tin,
        strides,
        padding,
        dim,
        d2,
        'K-RELU',
        name=scope + 'sc4',
        initializer=initializer,
        scale=rscale,
        bias_offset=max_bias)
    net['conv2_2'], tmp_tin, ops[4] = ld.create_sparse_conv_layer_reg(
        net['conv2_1'], [1, 3, 3, 16, 16],
        tmp_tin,
        strides,
        padding,
        dim,
        d2,
        'K-RELU',
        name=scope + 'sc5',
        initializer=initializer,
        scale=rscale,
        bias_offset=max_bias)
    net['conv2_3'], tmp_tin, ops[5] = ld.create_sparse_conv_layer_reg(
        net['conv2_2'], [1, 3, 3, 16, 16],
        tmp_tin,
        strides,
        padding,
        dim,
        d2,
        'K-ABS',
        name=scope + 'sc6',
        initializer=initializer,
        scale=rscale,
        bias_offset=max_bias)
    net['sparse_to_dense'] = ld.create_direct_sparse_to_dense(
        net['conv2_3'], dim)
    net['dense_reshaped1'] = tf.reshape(net['sparse_to_dense'],
                                        [batch_size, 1, 14, 14, 16])
    net['dense_reshaped2'] = tf.reshape(net['dense_reshaped1'],
                                        [batch_size, -1])
    net['dense1'] = tf.layers.dense(net['dense_reshaped2'], 512)
    net['dense2'] = tf.layers.dense(net['dense1'], num_classes)

    predictions = {
        'classes': tf.argmax(net['dense2'], axis=1),
        'probabilities': tf.nn.softmax(net['dense2'])
    }

    loss = tf.losses.softmax_cross_entropy(onehot_labels=train_labels,
                                           logits=net['dense2'])

    loss += tf.reduce_sum(tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES))
    accuracy = tf.metrics.accuracy(tf.argmax(train_labels, axis=1),
                                   predictions['classes'])

    return loss, predictions, accuracy, net, ops
コード例 #5
0
def model_mnist(sparse_data,
                tensor_in_sizes,
                train_labels=None,
                num_classes=10,
                scope='mn256-',
                initializer=None,
                d1=0.1,
                d2=0.3,
                d3=0.4):
    dim = 5
    strides = [1, 1, 1, 1, 1]
    padding = 'SAME'
    pooling_sizes = [1, 1, 2, 2, 1]
    batch_size = tensor_in_sizes[0]
    total_size = np.prod(tensor_in_sizes)

    net = {}

    net['sd_converted'] = ld.create_sparse_data_to_direct_sparse(
        sparse_data, dim)
    net['conv1_1'] = ld.create_sparse_conv_layer(net['sd_converted'],
                                                 [1, 3, 3, 1, 8],
                                                 strides,
                                                 padding,
                                                 dim,
                                                 d1,
                                                 'K-ABS',
                                                 name=scope + 'sc1',
                                                 initializer=initializer)
    net['conv1_2'] = ld.create_sparse_conv_layer(net['conv1_1'],
                                                 [1, 3, 3, 8, 8],
                                                 strides,
                                                 padding,
                                                 dim,
                                                 d1,
                                                 'K-RELU',
                                                 name=scope + 'sc2',
                                                 initializer=initializer)
    net['conv1_3'] = ld.create_sparse_conv_layer(net['conv1_2'],
                                                 [1, 3, 3, 8, 8],
                                                 strides,
                                                 padding,
                                                 dim,
                                                 d1,
                                                 'K-RELU',
                                                 name=scope + 'sc3',
                                                 initializer=initializer)
    net['pool1'] = ld.create_sparse_pooling_layer(net['conv1_3'],
                                                  pooling_sizes, dim, d3)
    net['conv2_1'] = ld.create_sparse_conv_layer(net['pool1'],
                                                 [1, 3, 3, 8, 16],
                                                 strides,
                                                 padding,
                                                 dim,
                                                 d2,
                                                 'K-ABS',
                                                 name=scope + 'sc4',
                                                 initializer=initializer)
    net['conv2_2'] = ld.create_sparse_conv_layer(net['conv2_1'],
                                                 [1, 3, 3, 16, 16],
                                                 strides,
                                                 padding,
                                                 dim,
                                                 d2,
                                                 'K-RELU',
                                                 name=scope + 'sc5',
                                                 initializer=initializer)
    net['conv2_3'] = ld.create_sparse_conv_layer(net['conv2_2'],
                                                 [1, 3, 3, 16, 16],
                                                 strides,
                                                 padding,
                                                 dim,
                                                 d2,
                                                 'K-ABS',
                                                 name=scope + 'sc6',
                                                 initializer=initializer)
    net['sparse_to_dense'] = ld.create_direct_sparse_to_dense(
        net['conv2_3'], dim)
    net['dense_reshaped1'] = tf.reshape(net['sparse_to_dense'],
                                        [batch_size, 1, 14, 14, 16])
    net['dense_reshaped2'] = tf.reshape(net['dense_reshaped1'],
                                        [batch_size, -1])
    net['dense1'] = tf.layers.dense(net['dense_reshaped2'], 512)
    net['dense2'] = tf.layers.dense(net['dense1'], num_classes)

    predictions = {
        'classes': tf.argmax(net['dense2'], axis=1),
        'probabilities': tf.nn.softmax(net['dense2'])
    }

    loss = tf.losses.softmax_cross_entropy(onehot_labels=train_labels,
                                           logits=tf.clip_by_value(
                                               net['dense2'], 1e-7, 1e8))

    accuracy = tf.metrics.accuracy(tf.argmax(train_labels, axis=1),
                                   predictions['classes'])

    return loss, predictions, accuracy, net